1   package org.apache.lucene.index;
2   
3   /*
4    * Licensed to the Apache Software Foundation (ASF) under one or more
5    * contributor license agreements.  See the NOTICE file distributed with
6    * this work for additional information regarding copyright ownership.
7    * The ASF licenses this file to You under the Apache License, Version 2.0
8    * (the "License"); you may not use this file except in compliance with
9    * the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  import java.io.Closeable;
21  import java.io.FileNotFoundException;
22  import java.io.IOException;
23  import java.nio.file.NoSuchFileException;
24  import java.util.ArrayList;
25  import java.util.Arrays;
26  import java.util.Collection;
27  import java.util.Collections;
28  import java.util.Date;
29  import java.util.HashMap;
30  import java.util.HashSet;
31  import java.util.Iterator;
32  import java.util.LinkedList;
33  import java.util.List;
34  import java.util.Locale;
35  import java.util.Map.Entry;
36  import java.util.Map;
37  import java.util.Queue;
38  import java.util.Set;
39  import java.util.concurrent.atomic.AtomicInteger;
40  import java.util.concurrent.atomic.AtomicLong;
41  
42  import org.apache.lucene.analysis.Analyzer;
43  import org.apache.lucene.codecs.Codec;
44  import org.apache.lucene.codecs.FieldInfosFormat;
45  import org.apache.lucene.document.Field;
46  import org.apache.lucene.index.DocValuesUpdate.BinaryDocValuesUpdate;
47  import org.apache.lucene.index.DocValuesUpdate.NumericDocValuesUpdate;
48  import org.apache.lucene.index.FieldInfos.FieldNumbers;
49  import org.apache.lucene.index.IndexWriterConfig.OpenMode;
50  import org.apache.lucene.search.MatchAllDocsQuery;
51  import org.apache.lucene.search.Query;
52  import org.apache.lucene.store.AlreadyClosedException;
53  import org.apache.lucene.store.Directory;
54  import org.apache.lucene.store.FilterDirectory;
55  import org.apache.lucene.store.FlushInfo;
56  import org.apache.lucene.store.IOContext;
57  import org.apache.lucene.store.IndexOutput;
58  import org.apache.lucene.store.Lock;
59  import org.apache.lucene.store.LockObtainFailedException;
60  import org.apache.lucene.store.LockValidatingDirectoryWrapper;
61  import org.apache.lucene.store.MMapDirectory;
62  import org.apache.lucene.store.MergeInfo;
63  import org.apache.lucene.store.RateLimitedIndexOutput;
64  import org.apache.lucene.store.SleepingLockWrapper;
65  import org.apache.lucene.store.TrackingDirectoryWrapper;
66  import org.apache.lucene.util.Accountable;
67  import org.apache.lucene.util.Bits;
68  import org.apache.lucene.util.BytesRef;
69  import org.apache.lucene.util.CloseableThreadLocal;
70  import org.apache.lucene.util.Constants;
71  import org.apache.lucene.util.IOUtils;
72  import org.apache.lucene.util.InfoStream;
73  import org.apache.lucene.util.StringHelper;
74  import org.apache.lucene.util.ThreadInterruptedException;
75  import org.apache.lucene.util.Version;
76  
77  /**
78    An <code>IndexWriter</code> creates and maintains an index.
79  
80    <p>The {@link OpenMode} option on 
81    {@link IndexWriterConfig#setOpenMode(OpenMode)} determines 
82    whether a new index is created, or whether an existing index is
83    opened. Note that you can open an index with {@link OpenMode#CREATE}
84    even while readers are using the index. The old readers will 
85    continue to search the "point in time" snapshot they had opened, 
86    and won't see the newly created index until they re-open. If 
87    {@link OpenMode#CREATE_OR_APPEND} is used IndexWriter will create a 
88    new index if there is not already an index at the provided path
89    and otherwise open the existing index.</p>
90  
91    <p>In either case, documents are added with {@link #addDocument(Iterable)
92    addDocument} and removed with {@link #deleteDocuments(Term...)} or {@link
93    #deleteDocuments(Query...)}. A document can be updated with {@link
94    #updateDocument(Term, Iterable) updateDocument} (which just deletes
95    and then adds the entire document). When finished adding, deleting 
96    and updating documents, {@link #close() close} should be called.</p>
97  
98    <a name="flush"></a>
99    <p>These changes are buffered in memory and periodically
100   flushed to the {@link Directory} (during the above method
101   calls). A flush is triggered when there are enough added documents
102   since the last flush. Flushing is triggered either by RAM usage of the
103   documents (see {@link IndexWriterConfig#setRAMBufferSizeMB}) or the
104   number of added documents (see {@link IndexWriterConfig#setMaxBufferedDocs(int)}).
105   The default is to flush when RAM usage hits
106   {@link IndexWriterConfig#DEFAULT_RAM_BUFFER_SIZE_MB} MB. For
107   best indexing speed you should flush by RAM usage with a
108   large RAM buffer. Additionally, if IndexWriter reaches the configured number of
109   buffered deletes (see {@link IndexWriterConfig#setMaxBufferedDeleteTerms})
110   the deleted terms and queries are flushed and applied to existing segments.
111   In contrast to the other flush options {@link IndexWriterConfig#setRAMBufferSizeMB} and 
112   {@link IndexWriterConfig#setMaxBufferedDocs(int)}, deleted terms
113   won't trigger a segment flush. Note that flushing just moves the
114   internal buffered state in IndexWriter into the index, but
115   these changes are not visible to IndexReader until either
116   {@link #commit()} or {@link #close} is called.  A flush may
117   also trigger one or more segment merges which by default
118   run with a background thread so as not to block the
119   addDocument calls (see <a href="#mergePolicy">below</a>
120   for changing the {@link MergeScheduler}).</p>
121 
122   <p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
123   another <code>IndexWriter</code> on the same directory will lead to a
124   {@link LockObtainFailedException}.</p>
125   
126   <a name="deletionPolicy"></a>
127   <p>Expert: <code>IndexWriter</code> allows an optional
128   {@link IndexDeletionPolicy} implementation to be
129   specified.  You can use this to control when prior commits
130   are deleted from the index.  The default policy is {@link
131   KeepOnlyLastCommitDeletionPolicy} which removes all prior
132   commits as soon as a new commit is done (this matches
133   behavior before 2.2).  Creating your own policy can allow
134   you to explicitly keep previous "point in time" commits
135   alive in the index for some time, to allow readers to
136   refresh to the new commit without having the old commit
137   deleted out from under them.  This is necessary on
138   filesystems like NFS that do not support "delete on last
139   close" semantics, which Lucene's "point in time" search
140   normally relies on. </p>
141 
142   <a name="mergePolicy"></a> <p>Expert:
143   <code>IndexWriter</code> allows you to separately change
144   the {@link MergePolicy} and the {@link MergeScheduler}.
145   The {@link MergePolicy} is invoked whenever there are
146   changes to the segments in the index.  Its role is to
147   select which merges to do, if any, and return a {@link
148   MergePolicy.MergeSpecification} describing the merges.
149   The default is {@link LogByteSizeMergePolicy}.  Then, the {@link
150   MergeScheduler} is invoked with the requested merges and
151   it decides when and how to run the merges.  The default is
152   {@link ConcurrentMergeScheduler}. </p>
153 
154   <a name="OOME"></a><p><b>NOTE</b>: if you hit a
155   VirtualMachineError, or disaster strikes during a checkpoint
156   then IndexWriter will close itself.  This is a
157   defensive measure in case any internal state (buffered
158   documents, deletions, reference counts) were corrupted.  
159   Any subsequent calls will throw an AlreadyClosedException.</p>
160 
161   <a name="thread-safety"></a><p><b>NOTE</b>: {@link
162   IndexWriter} instances are completely thread
163   safe, meaning multiple threads can call any of its
164   methods, concurrently.  If your application requires
165   external synchronization, you should <b>not</b>
166   synchronize on the <code>IndexWriter</code> instance as
167   this may cause deadlock; use your own (non-Lucene) objects
168   instead. </p>
169   
170   <p><b>NOTE</b>: If you call
171   <code>Thread.interrupt()</code> on a thread that's within
172   IndexWriter, IndexWriter will try to catch this (eg, if
173   it's in a wait() or Thread.sleep()), and will then throw
174   the unchecked exception {@link ThreadInterruptedException}
175   and <b>clear</b> the interrupt status on the thread.</p>
176 */
177 
178 /*
179  * Clarification: Check Points (and commits)
180  * IndexWriter writes new index files to the directory without writing a new segments_N
181  * file which references these new files. It also means that the state of
182  * the in memory SegmentInfos object is different than the most recent
183  * segments_N file written to the directory.
184  *
185  * Each time the SegmentInfos is changed, and matches the (possibly
186  * modified) directory files, we have a new "check point".
187  * If the modified/new SegmentInfos is written to disk - as a new
188  * (generation of) segments_N file - this check point is also an
189  * IndexCommit.
190  *
191  * A new checkpoint always replaces the previous checkpoint and
192  * becomes the new "front" of the index. This allows the IndexFileDeleter
193  * to delete files that are referenced only by stale checkpoints.
194  * (files that were created since the last commit, but are no longer
195  * referenced by the "front" of the index). For this, IndexFileDeleter
196  * keeps track of the last non commit checkpoint.
197  */
198 public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
199 
200   /** Hard limit on maximum number of documents that may be added to the
201    *  index.  If you try to add more than this you'll hit {@code IllegalArgumentException}. */
202   // We defensively subtract 128 to be well below the lowest
203   // ArrayUtil.MAX_ARRAY_LENGTH on "typical" JVMs.  We don't just use
204   // ArrayUtil.MAX_ARRAY_LENGTH here because this can vary across JVMs:
205   public static final int MAX_DOCS = Integer.MAX_VALUE - 128;
206 
207   /** Maximum value of the token position in an indexed field. */
208   public static final int MAX_POSITION = Integer.MAX_VALUE - 128;
209 
210   // Use package-private instance var to enforce the limit so testing
211   // can use less electricity:
212   private static int actualMaxDocs = MAX_DOCS;
213 
214   /** Used only for testing. */
215   static void setMaxDocs(int maxDocs) {
216     if (maxDocs > MAX_DOCS) {
217       // Cannot go higher than the hard max:
218       throw new IllegalArgumentException("maxDocs must be <= IndexWriter.MAX_DOCS=" + MAX_DOCS + "; got: " + maxDocs);
219     }
220     IndexWriter.actualMaxDocs = maxDocs;
221   }
222 
223   static int getActualMaxDocs() {
224     return IndexWriter.actualMaxDocs;
225   }
226   
227   /** Used only for testing. */
228   boolean enableTestPoints = false;
229 
230   private static final int UNBOUNDED_MAX_MERGE_SEGMENTS = -1;
231   
232   /**
233    * Name of the write lock in the index.
234    */
235   public static final String WRITE_LOCK_NAME = "write.lock";
236 
237   /** Key for the source of a segment in the {@link SegmentInfo#getDiagnostics() diagnostics}. */
238   public static final String SOURCE = "source";
239   /** Source of a segment which results from a merge of other segments. */
240   public static final String SOURCE_MERGE = "merge";
241   /** Source of a segment which results from a flush. */
242   public static final String SOURCE_FLUSH = "flush";
243   /** Source of a segment which results from a call to {@link #addIndexes(CodecReader...)}. */
244   public static final String SOURCE_ADDINDEXES_READERS = "addIndexes(CodecReader...)";
245 
246   /**
247    * Absolute hard maximum length for a term, in bytes once
248    * encoded as UTF8.  If a term arrives from the analyzer
249    * longer than this length, an
250    * <code>IllegalArgumentException</code>  is thrown
251    * and a message is printed to infoStream, if set (see {@link
252    * IndexWriterConfig#setInfoStream(InfoStream)}).
253    */
254   public final static int MAX_TERM_LENGTH = DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8;
255   // when unrecoverable disaster strikes, we populate this with the reason that we had to close IndexWriter
256   volatile Throwable tragedy;
257 
258   private final Directory directoryOrig;       // original user directory
259   private final Directory directory;           // wrapped with additional checks
260   private final Directory mergeDirectory;      // wrapped with throttling: used for merging
261   private final Analyzer analyzer;    // how to analyze text
262 
263   private final AtomicLong changeCount = new AtomicLong(); // increments every time a change is completed
264   private volatile long lastCommitChangeCount; // last changeCount that was committed
265 
266   private List<SegmentCommitInfo> rollbackSegments;      // list of segmentInfo we will fallback to if the commit fails
267 
268   volatile SegmentInfos pendingCommit;            // set when a commit is pending (after prepareCommit() & before commit())
269   volatile long pendingCommitChangeCount;
270 
271   private Collection<String> filesToCommit;
272 
273   final SegmentInfos segmentInfos;       // the segments
274   final FieldNumbers globalFieldNumberMap;
275 
276   private final DocumentsWriter docWriter;
277   private final Queue<Event> eventQueue;
278   final IndexFileDeleter deleter;
279 
280   // used by forceMerge to note those needing merging
281   private Map<SegmentCommitInfo,Boolean> segmentsToMerge = new HashMap<>();
282   private int mergeMaxNumSegments;
283 
284   private Lock writeLock;
285 
286   private volatile boolean closed;
287   private volatile boolean closing;
288 
289   // Holds all SegmentInfo instances currently involved in
290   // merges
291   private HashSet<SegmentCommitInfo> mergingSegments = new HashSet<>();
292 
293   private final MergeScheduler mergeScheduler;
294   private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<>();
295   private Set<MergePolicy.OneMerge> runningMerges = new HashSet<>();
296   private List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<>();
297   private long mergeGen;
298   private boolean stopMerges;
299   private boolean didMessageState;
300 
301   final AtomicInteger flushCount = new AtomicInteger();
302   final AtomicInteger flushDeletesCount = new AtomicInteger();
303 
304   final ReaderPool readerPool = new ReaderPool();
305   final BufferedUpdatesStream bufferedUpdatesStream;
306 
307   // This is a "write once" variable (like the organic dye
308   // on a DVD-R that may or may not be heated by a laser and
309   // then cooled to permanently record the event): it's
310   // false, until getReader() is called for the first time,
311   // at which point it's switched to true and never changes
312   // back to false.  Once this is true, we hold open and
313   // reuse SegmentReader instances internally for applying
314   // deletes, doing merges, and reopening near real-time
315   // readers.
316   private volatile boolean poolReaders;
317 
318   // The instance that was passed to the constructor. It is saved only in order
319   // to allow users to query an IndexWriter settings.
320   private final LiveIndexWriterConfig config;
321 
322   /** System.nanoTime() when commit started; used to write
323    *  an infoStream message about how long commit took. */
324   private long startCommitTime;
325 
326   /** How many documents are in the index, or are in the process of being
327    *  added (reserved).  E.g., operations like addIndexes will first reserve
328    *  the right to add N docs, before they actually change the index,
329    *  much like how hotels place an "authorization hold" on your credit
330    *  card to make sure they can later charge you when you check out. */
331   final AtomicLong pendingNumDocs = new AtomicLong();
332 
333   final CloseableThreadLocal<MergeRateLimiter> rateLimiters = new CloseableThreadLocal<>();
334 
335   DirectoryReader getReader() throws IOException {
336     return getReader(true);
337   }
338 
339   /**
340    * Expert: returns a readonly reader, covering all
341    * committed as well as un-committed changes to the index.
342    * This provides "near real-time" searching, in that
343    * changes made during an IndexWriter session can be
344    * quickly made available for searching without closing
345    * the writer nor calling {@link #commit}.
346    *
347    * <p>Note that this is functionally equivalent to calling
348    * {#flush} and then opening a new reader.  But the turnaround time of this
349    * method should be faster since it avoids the potentially
350    * costly {@link #commit}.</p>
351    *
352    * <p>You must close the {@link IndexReader} returned by
353    * this method once you are done using it.</p>
354    *
355    * <p>It's <i>near</i> real-time because there is no hard
356    * guarantee on how quickly you can get a new reader after
357    * making changes with IndexWriter.  You'll have to
358    * experiment in your situation to determine if it's
359    * fast enough.  As this is a new and experimental
360    * feature, please report back on your findings so we can
361    * learn, improve and iterate.</p>
362    *
363    * <p>The resulting reader supports {@link
364    * DirectoryReader#openIfChanged}, but that call will simply forward
365    * back to this method (though this may change in the
366    * future).</p>
367    *
368    * <p>The very first time this method is called, this
369    * writer instance will make every effort to pool the
370    * readers that it opens for doing merges, applying
371    * deletes, etc.  This means additional resources (RAM,
372    * file descriptors, CPU time) will be consumed.</p>
373    *
374    * <p>For lower latency on reopening a reader, you should
375    * call {@link IndexWriterConfig#setMergedSegmentWarmer} to
376    * pre-warm a newly merged segment before it's committed
377    * to the index.  This is important for minimizing
378    * index-to-search delay after a large merge.  </p>
379    *
380    * <p>If an addIndexes* call is running in another thread,
381    * then this reader will only search those segments from
382    * the foreign index that have been successfully copied
383    * over, so far</p>.
384    *
385    * <p><b>NOTE</b>: Once the writer is closed, any
386    * outstanding readers may continue to be used.  However,
387    * if you attempt to reopen any of those readers, you'll
388    * hit an {@link AlreadyClosedException}.</p>
389    *
390    * @lucene.experimental
391    *
392    * @return IndexReader that covers entire index plus all
393    * changes made so far by this IndexWriter instance
394    *
395    * @throws IOException If there is a low-level I/O error
396    */
397   DirectoryReader getReader(boolean applyAllDeletes) throws IOException {
398     ensureOpen();
399 
400     final long tStart = System.currentTimeMillis();
401 
402     if (infoStream.isEnabled("IW")) {
403       infoStream.message("IW", "flush at getReader");
404     }
405     // Do this up front before flushing so that the readers
406     // obtained during this flush are pooled, the first time
407     // this method is called:
408     poolReaders = true;
409     DirectoryReader r = null;
410     doBeforeFlush();
411     boolean anyChanges = false;
412     /*
413      * for releasing a NRT reader we must ensure that 
414      * DW doesn't add any segments or deletes until we are
415      * done with creating the NRT DirectoryReader. 
416      * We release the two stage full flush after we are done opening the
417      * directory reader!
418      */
419     boolean success2 = false;
420     try {
421       boolean success = false;
422       synchronized (fullFlushLock) {
423         try {
424           anyChanges = docWriter.flushAllThreads();
425           if (!anyChanges) {
426             // prevent double increment since docWriter#doFlush increments the flushcount
427             // if we flushed anything.
428             flushCount.incrementAndGet();
429           }
430           // Prevent segmentInfos from changing while opening the
431           // reader; in theory we could instead do similar retry logic,
432           // just like we do when loading segments_N
433           synchronized(this) {
434             anyChanges |= maybeApplyDeletes(applyAllDeletes);
435             r = StandardDirectoryReader.open(this, segmentInfos, applyAllDeletes);
436             if (infoStream.isEnabled("IW")) {
437               infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
438             }
439           }
440           success = true;
441         } finally {
442           // Done: finish the full flush!
443           docWriter.finishFullFlush(this, success);
444           if (success) {
445             processEvents(false, true);
446             doAfterFlush();
447           } else {
448             if (infoStream.isEnabled("IW")) {
449               infoStream.message("IW", "hit exception during NRT reader");
450             }
451           }
452         }
453       }
454       if (anyChanges) {
455         maybeMerge(config.getMergePolicy(), MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
456       }
457       if (infoStream.isEnabled("IW")) {
458         infoStream.message("IW", "getReader took " + (System.currentTimeMillis() - tStart) + " msec");
459       }
460       success2 = true;
461     } catch (AbortingException | VirtualMachineError tragedy) {
462       tragicEvent(tragedy, "getReader");
463       // never reached but javac disagrees:
464       return null;
465     } finally {
466       if (!success2) {
467         IOUtils.closeWhileHandlingException(r);
468       }
469     }
470     return r;
471   }
472 
473   @Override
474   public final long ramBytesUsed() {
475     ensureOpen();
476     return docWriter.ramBytesUsed();
477   }
478   
479   @Override
480   public Collection<Accountable> getChildResources() {
481     return Collections.emptyList();
482   }
483 
484   /** Holds shared SegmentReader instances. IndexWriter uses
485    *  SegmentReaders for 1) applying deletes, 2) doing
486    *  merges, 3) handing out a real-time reader.  This pool
487    *  reuses instances of the SegmentReaders in all these
488    *  places if it is in "near real-time mode" (getReader()
489    *  has been called on this instance). */
490 
491   class ReaderPool implements Closeable {
492     
493     private final Map<SegmentCommitInfo,ReadersAndUpdates> readerMap = new HashMap<>();
494 
495     // used only by asserts
496     public synchronized boolean infoIsLive(SegmentCommitInfo info) {
497       int idx = segmentInfos.indexOf(info);
498       assert idx != -1: "info=" + info + " isn't live";
499       assert segmentInfos.info(idx) == info: "info=" + info + " doesn't match live info in segmentInfos";
500       return true;
501     }
502 
503     public synchronized void drop(SegmentCommitInfo info) throws IOException {
504       final ReadersAndUpdates rld = readerMap.get(info);
505       if (rld != null) {
506         assert info == rld.info;
507 //        System.out.println("[" + Thread.currentThread().getName() + "] ReaderPool.drop: " + info);
508         readerMap.remove(info);
509         rld.dropReaders();
510       }
511     }
512 
513     public synchronized boolean anyPendingDeletes() {
514       for(ReadersAndUpdates rld : readerMap.values()) {
515         if (rld.getPendingDeleteCount() != 0) {
516           return true;
517         }
518       }
519 
520       return false;
521     }
522 
523     public synchronized void release(ReadersAndUpdates rld) throws IOException {
524       release(rld, true);
525     }
526 
527     public synchronized void release(ReadersAndUpdates rld, boolean assertInfoLive) throws IOException {
528 
529       // Matches incRef in get:
530       rld.decRef();
531 
532       // Pool still holds a ref:
533       assert rld.refCount() >= 1;
534 
535       if (!poolReaders && rld.refCount() == 1) {
536         // This is the last ref to this RLD, and we're not
537         // pooling, so remove it:
538 //        System.out.println("[" + Thread.currentThread().getName() + "] ReaderPool.release: " + rld.info);
539         if (rld.writeLiveDocs(directory)) {
540           // Make sure we only write del docs for a live segment:
541           assert assertInfoLive == false || infoIsLive(rld.info);
542           // Must checkpoint because we just
543           // created new _X_N.del and field updates files;
544           // don't call IW.checkpoint because that also
545           // increments SIS.version, which we do not want to
546           // do here: it was done previously (after we
547           // invoked BDS.applyDeletes), whereas here all we
548           // did was move the state to disk:
549           checkpointNoSIS();
550         }
551         //System.out.println("IW: done writeLiveDocs for info=" + rld.info);
552 
553 //        System.out.println("[" + Thread.currentThread().getName() + "] ReaderPool.release: drop readers " + rld.info);
554         rld.dropReaders();
555         readerMap.remove(rld.info);
556       }
557     }
558     
559     @Override
560     public void close() throws IOException {
561       dropAll(false);
562     }
563 
564     /** Remove all our references to readers, and commits
565      *  any pending changes. */
566     synchronized void dropAll(boolean doSave) throws IOException {
567       Throwable priorE = null;
568       final Iterator<Map.Entry<SegmentCommitInfo,ReadersAndUpdates>> it = readerMap.entrySet().iterator();
569       while(it.hasNext()) {
570         final ReadersAndUpdates rld = it.next().getValue();
571 
572         try {
573           if (doSave && rld.writeLiveDocs(directory)) {
574             // Make sure we only write del docs and field updates for a live segment:
575             assert infoIsLive(rld.info);
576             // Must checkpoint because we just
577             // created new _X_N.del and field updates files;
578             // don't call IW.checkpoint because that also
579             // increments SIS.version, which we do not want to
580             // do here: it was done previously (after we
581             // invoked BDS.applyDeletes), whereas here all we
582             // did was move the state to disk:
583             checkpointNoSIS();
584           }
585         } catch (Throwable t) {
586           if (doSave) {
587             IOUtils.reThrow(t);
588           } else if (priorE == null) {
589             priorE = t;
590           }
591         }
592 
593         // Important to remove as-we-go, not with .clear()
594         // in the end, in case we hit an exception;
595         // otherwise we could over-decref if close() is
596         // called again:
597         it.remove();
598 
599         // NOTE: it is allowed that these decRefs do not
600         // actually close the SRs; this happens when a
601         // near real-time reader is kept open after the
602         // IndexWriter instance is closed:
603         try {
604           rld.dropReaders();
605         } catch (Throwable t) {
606           if (doSave) {
607             IOUtils.reThrow(t);
608           } else if (priorE == null) {
609             priorE = t;
610           }
611         }
612       }
613       assert readerMap.size() == 0;
614       IOUtils.reThrow(priorE);
615     }
616 
617     /**
618      * Commit live docs changes for the segment readers for
619      * the provided infos.
620      *
621      * @throws IOException If there is a low-level I/O error
622      */
623     public synchronized void commit(SegmentInfos infos) throws IOException {
624       for (SegmentCommitInfo info : infos) {
625         final ReadersAndUpdates rld = readerMap.get(info);
626         if (rld != null) {
627           assert rld.info == info;
628           if (rld.writeLiveDocs(directory)) {
629             // Make sure we only write del docs for a live segment:
630             assert infoIsLive(info);
631 
632             // Must checkpoint because we just
633             // created new _X_N.del and field updates files;
634             // don't call IW.checkpoint because that also
635             // increments SIS.version, which we do not want to
636             // do here: it was done previously (after we
637             // invoked BDS.applyDeletes), whereas here all we
638             // did was move the state to disk:
639             checkpointNoSIS();
640           }
641         }
642       }
643     }
644 
645     /**
646      * Obtain a ReadersAndLiveDocs instance from the
647      * readerPool.  If create is true, you must later call
648      * {@link #release(ReadersAndUpdates)}.
649      */
650     public synchronized ReadersAndUpdates get(SegmentCommitInfo info, boolean create) {
651 
652       // Make sure no new readers can be opened if another thread just closed us:
653       ensureOpen(false);
654 
655       assert info.info.dir == directoryOrig: "info.dir=" + info.info.dir + " vs " + directoryOrig;
656 
657       ReadersAndUpdates rld = readerMap.get(info);
658       if (rld == null) {
659         if (!create) {
660           return null;
661         }
662         rld = new ReadersAndUpdates(IndexWriter.this, info);
663         // Steal initial reference:
664         readerMap.put(info, rld);
665       } else {
666         assert rld.info == info: "rld.info=" + rld.info + " info=" + info + " isLive?=" + infoIsLive(rld.info) + " vs " + infoIsLive(info);
667       }
668 
669       if (create) {
670         // Return ref to caller:
671         rld.incRef();
672       }
673 
674       assert noDups();
675 
676       return rld;
677     }
678 
679     // Make sure that every segment appears only once in the
680     // pool:
681     private boolean noDups() {
682       Set<String> seen = new HashSet<>();
683       for(SegmentCommitInfo info : readerMap.keySet()) {
684         assert !seen.contains(info.info.name);
685         seen.add(info.info.name);
686       }
687       return true;
688     }
689   }
690 
691   /**
692    * Obtain the number of deleted docs for a pooled reader.
693    * If the reader isn't being pooled, the segmentInfo's 
694    * delCount is returned.
695    */
696   public int numDeletedDocs(SegmentCommitInfo info) {
697     ensureOpen(false);
698     int delCount = info.getDelCount();
699 
700     final ReadersAndUpdates rld = readerPool.get(info, false);
701     if (rld != null) {
702       delCount += rld.getPendingDeleteCount();
703     }
704     return delCount;
705   }
706 
707   /**
708    * Used internally to throw an {@link AlreadyClosedException} if this
709    * IndexWriter has been closed or is in the process of closing.
710    * 
711    * @param failIfClosing
712    *          if true, also fail when {@code IndexWriter} is in the process of
713    *          closing ({@code closing=true}) but not yet done closing (
714    *          {@code closed=false})
715    * @throws AlreadyClosedException
716    *           if this IndexWriter is closed or in the process of closing
717    */
718   protected final void ensureOpen(boolean failIfClosing) throws AlreadyClosedException {
719     if (closed || (failIfClosing && closing)) {
720       throw new AlreadyClosedException("this IndexWriter is closed", tragedy);
721     }
722   }
723 
724   /**
725    * Used internally to throw an {@link
726    * AlreadyClosedException} if this IndexWriter has been
727    * closed ({@code closed=true}) or is in the process of
728    * closing ({@code closing=true}).
729    * <p>
730    * Calls {@link #ensureOpen(boolean) ensureOpen(true)}.
731    * @throws AlreadyClosedException if this IndexWriter is closed
732    */
733   protected final void ensureOpen() throws AlreadyClosedException {
734     ensureOpen(true);
735   }
736 
737   final Codec codec; // for writing new segments
738 
739   /**
740    * Constructs a new IndexWriter per the settings given in <code>conf</code>.
741    * If you want to make "live" changes to this writer instance, use
742    * {@link #getConfig()}.
743    * 
744    * <p>
745    * <b>NOTE:</b> after ths writer is created, the given configuration instance
746    * cannot be passed to another writer. If you intend to do so, you should
747    * {@link IndexWriterConfig#clone() clone} it beforehand.
748    * 
749    * @param d
750    *          the index directory. The index is either created or appended
751    *          according <code>conf.getOpenMode()</code>.
752    * @param conf
753    *          the configuration settings according to which IndexWriter should
754    *          be initialized.
755    * @throws IOException
756    *           if the directory cannot be read/written to, or if it does not
757    *           exist and <code>conf.getOpenMode()</code> is
758    *           <code>OpenMode.APPEND</code> or if there is any other low-level
759    *           IO error
760    */
761   public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
762     conf.setIndexWriter(this); // prevent reuse by other instances
763     config = conf;
764     infoStream = config.getInfoStream();
765 
766     // obtain the write.lock. If the user configured a timeout,
767     // we wrap with a sleeper and this might take some time.
768     long timeout = config.getWriteLockTimeout();
769     final Directory lockDir;
770     if (timeout == 0) {
771       // user doesn't want sleep/retries
772       lockDir = d;
773     } else {
774       lockDir = new SleepingLockWrapper(d, timeout);
775     }
776     writeLock = lockDir.obtainLock(WRITE_LOCK_NAME);
777     
778     boolean success = false;
779     try {
780       directoryOrig = d;
781       directory = new LockValidatingDirectoryWrapper(d, writeLock);
782 
783       // Directory we use for merging, so we can abort running merges, and so
784       // merge schedulers can optionally rate-limit per-merge IO:
785       mergeDirectory = addMergeRateLimiters(directory);
786 
787       analyzer = config.getAnalyzer();
788       mergeScheduler = config.getMergeScheduler();
789       mergeScheduler.setInfoStream(infoStream);
790       codec = config.getCodec();
791 
792       bufferedUpdatesStream = new BufferedUpdatesStream(infoStream);
793       poolReaders = config.getReaderPooling();
794 
795       OpenMode mode = config.getOpenMode();
796       boolean create;
797       if (mode == OpenMode.CREATE) {
798         create = true;
799       } else if (mode == OpenMode.APPEND) {
800         create = false;
801       } else {
802         // CREATE_OR_APPEND - create only if an index does not exist
803         create = !DirectoryReader.indexExists(directory);
804       }
805 
806       // If index is too old, reading the segments will throw
807       // IndexFormatTooOldException.
808 
809       boolean initialIndexExists = true;
810 
811       String[] files = directory.listAll();
812 
813       // Set up our initial SegmentInfos:
814       IndexCommit commit = config.getIndexCommit();
815 
816       // Set up our initial SegmentInfos:
817       StandardDirectoryReader reader;
818       if (commit == null) {
819         reader = null;
820       } else {
821         reader = commit.getReader();
822       }
823 
824       if (create) {
825 
826         if (config.getIndexCommit() != null) {
827           // We cannot both open from a commit point and create:
828           if (mode == OpenMode.CREATE) {
829             throw new IllegalArgumentException("cannot use IndexWriterConfig.setIndexCommit() with OpenMode.CREATE");
830           } else {
831             throw new IllegalArgumentException("cannot use IndexWriterConfig.setIndexCommit() when index has no commit");
832           }
833         }
834 
835         // Try to read first.  This is to allow create
836         // against an index that's currently open for
837         // searching.  In this case we write the next
838         // segments_N file with no segments:
839         SegmentInfos sis = null;
840         try {
841           sis = SegmentInfos.readLatestCommit(directory);
842           sis.clear();
843         } catch (IOException e) {
844           // Likely this means it's a fresh directory
845           initialIndexExists = false;
846           sis = new SegmentInfos();
847         }
848         
849         segmentInfos = sis;
850 
851         rollbackSegments = segmentInfos.createBackupSegmentInfos();
852 
853         // Record that we have a change (zero out all
854         // segments) pending:
855         changed();
856 
857       } else if (reader != null) {
858         // Init from an existing already opened NRT or non-NRT reader:
859       
860         if (reader.directory() != commit.getDirectory()) {
861           throw new IllegalArgumentException("IndexCommit's reader must have the same directory as the IndexCommit");
862         }
863 
864         if (reader.directory() != directoryOrig) {
865           throw new IllegalArgumentException("IndexCommit's reader must have the same directory passed to IndexWriter");
866         }
867 
868         if (reader.segmentInfos.getLastGeneration() == 0) {  
869           // TODO: maybe we could allow this?  It's tricky...
870           throw new IllegalArgumentException("index must already have an initial commit to open from reader");
871         }
872 
873         // Must clone because we don't want the incoming NRT reader to "see" any changes this writer now makes:
874         segmentInfos = reader.segmentInfos.clone();
875 
876         SegmentInfos lastCommit;
877         try {
878           lastCommit = SegmentInfos.readCommit(directoryOrig, segmentInfos.getSegmentsFileName());
879         } catch (IOException ioe) {
880           throw new IllegalArgumentException("the provided reader is stale: its prior commit file \"" + segmentInfos.getSegmentsFileName() + "\" is missing from index");
881         }
882 
883         if (reader.writer != null) {
884 
885           // The old writer better be closed (we have the write lock now!):
886           assert reader.writer.closed;
887 
888           // In case the old writer wrote further segments (which we are now dropping),
889           // update SIS metadata so we remain write-once:
890           segmentInfos.updateGenerationVersionAndCounter(reader.writer.segmentInfos);
891           lastCommit.updateGenerationVersionAndCounter(reader.writer.segmentInfos);
892         }
893 
894         rollbackSegments = lastCommit.createBackupSegmentInfos();
895 
896         if (infoStream.isEnabled("IW")) {
897           infoStream.message("IW", "init from reader " + reader);
898           messageState();
899         }
900       } else {
901         // Init from either the latest commit point, or an explicit prior commit point:
902 
903         String lastSegmentsFile = SegmentInfos.getLastCommitSegmentsFileName(files);
904         if (lastSegmentsFile == null) {
905           throw new IndexNotFoundException("no segments* file found in " + directory + ": files: " + Arrays.toString(files));
906         }
907 
908         // Do not use SegmentInfos.read(Directory) since the spooky
909         // retrying it does is not necessary here (we hold the write lock):
910         segmentInfos = SegmentInfos.readCommit(directoryOrig, lastSegmentsFile);
911 
912         if (commit != null) {
913           // Swap out all segments, but, keep metadata in
914           // SegmentInfos, like version & generation, to
915           // preserve write-once.  This is important if
916           // readers are open against the future commit
917           // points.
918           if (commit.getDirectory() != directoryOrig) {
919             throw new IllegalArgumentException("IndexCommit's directory doesn't match my directory, expected=" + directoryOrig + ", got=" + commit.getDirectory());
920           }
921           
922           SegmentInfos oldInfos = SegmentInfos.readCommit(directoryOrig, commit.getSegmentsFileName());
923           segmentInfos.replace(oldInfos);
924           changed();
925 
926           if (infoStream.isEnabled("IW")) {
927             infoStream.message("IW", "init: loaded commit \"" + commit.getSegmentsFileName() + "\"");
928           }
929         }
930 
931         rollbackSegments = segmentInfos.createBackupSegmentInfos();
932       }
933 
934       pendingNumDocs.set(segmentInfos.totalMaxDoc());
935 
936       // start with previous field numbers, but new FieldInfos
937       // NOTE: this is correct even for an NRT reader because we'll pull FieldInfos even for the un-committed segments:
938       globalFieldNumberMap = getFieldNumberMap();
939 
940       config.getFlushPolicy().init(config);
941       docWriter = new DocumentsWriter(this, config, directoryOrig, directory);
942       eventQueue = docWriter.eventQueue();
943 
944       // Default deleter (for backwards compatibility) is
945       // KeepOnlyLastCommitDeleter:
946 
947       // Sync'd is silly here, but IFD asserts we sync'd on the IW instance:
948       synchronized(this) {
949         deleter = new IndexFileDeleter(files, directoryOrig, directory,
950                                        config.getIndexDeletionPolicy(),
951                                        segmentInfos, infoStream, this,
952                                        initialIndexExists, reader != null);
953 
954         // We incRef all files when we return an NRT reader from IW, so all files must exist even in the NRT case:
955         assert create || filesExist(segmentInfos);
956       }
957 
958       if (deleter.startingCommitDeleted) {
959         // Deletion policy deleted the "head" commit point.
960         // We have to mark ourself as changed so that if we
961         // are closed w/o any further changes we write a new
962         // segments_N file.
963         changed();
964       }
965 
966       if (reader != null) {
967         // Pre-enroll all segment readers into the reader pool; this is necessary so
968         // any in-memory NRT live docs are correctly carried over, and so NRT readers
969         // pulled from this IW share the same segment reader:
970         List<LeafReaderContext> leaves = reader.leaves();
971         assert segmentInfos.size() == leaves.size();
972 
973         for (int i=0;i<leaves.size();i++) {
974           LeafReaderContext leaf = leaves.get(i);
975           SegmentReader segReader = (SegmentReader) leaf.reader();
976           SegmentReader newReader = new SegmentReader(segmentInfos.info(i), segReader, segReader.getLiveDocs(), segReader.numDocs());
977           readerPool.readerMap.put(newReader.getSegmentInfo(), new ReadersAndUpdates(this, newReader));
978         }
979 
980         // We always assume we are carrying over incoming changes when opening from reader:
981         segmentInfos.changed();
982         changed();
983       }
984 
985       if (infoStream.isEnabled("IW")) {
986         infoStream.message("IW", "init: create=" + create);
987         messageState();
988       }
989 
990       success = true;
991 
992     } finally {
993       if (!success) {
994         if (infoStream.isEnabled("IW")) {
995           infoStream.message("IW", "init: hit exception on init; releasing write lock");
996         }
997         IOUtils.closeWhileHandlingException(writeLock);
998         writeLock = null;
999       }
1000     }
1001   }
1002 
1003   // reads latest field infos for the commit
1004   // this is used on IW init and addIndexes(Dir) to create/update the global field map.
1005   // TODO: fix tests abusing this method!
1006   static FieldInfos readFieldInfos(SegmentCommitInfo si) throws IOException {
1007     Codec codec = si.info.getCodec();
1008     FieldInfosFormat reader = codec.fieldInfosFormat();
1009     
1010     if (si.hasFieldUpdates()) {
1011       // there are updates, we read latest (always outside of CFS)
1012       final String segmentSuffix = Long.toString(si.getFieldInfosGen(), Character.MAX_RADIX);
1013       return reader.read(si.info.dir, si.info, segmentSuffix, IOContext.READONCE);
1014     } else if (si.info.getUseCompoundFile()) {
1015       // cfs
1016       try (Directory cfs = codec.compoundFormat().getCompoundReader(si.info.dir, si.info, IOContext.DEFAULT)) {
1017         return reader.read(cfs, si.info, "", IOContext.READONCE);
1018       }
1019     } else {
1020       // no cfs
1021       return reader.read(si.info.dir, si.info, "", IOContext.READONCE);
1022     }
1023   }
1024 
1025   /**
1026    * Loads or returns the already loaded the global field number map for this {@link SegmentInfos}.
1027    * If this {@link SegmentInfos} has no global field number map the returned instance is empty
1028    */
1029   private FieldNumbers getFieldNumberMap() throws IOException {
1030     final FieldNumbers map = new FieldNumbers();
1031 
1032     for(SegmentCommitInfo info : segmentInfos) {
1033       FieldInfos fis = readFieldInfos(info);
1034       for(FieldInfo fi : fis) {
1035         map.addOrGet(fi.name, fi.number, fi.getDocValuesType());
1036       }
1037     }
1038 
1039     return map;
1040   }
1041   
1042   /**
1043    * Returns a {@link LiveIndexWriterConfig}, which can be used to query the IndexWriter
1044    * current settings, as well as modify "live" ones.
1045    */
1046   public LiveIndexWriterConfig getConfig() {
1047     ensureOpen(false);
1048     return config;
1049   }
1050 
1051   private void messageState() {
1052     if (infoStream.isEnabled("IW") && didMessageState == false) {
1053       didMessageState = true;
1054       infoStream.message("IW", "\ndir=" + directoryOrig + "\n" +
1055             "index=" + segString() + "\n" +
1056             "version=" + Version.LATEST.toString() + "\n" +
1057             config.toString());
1058       infoStream.message("IW", "MMapDirectory.UNMAP_SUPPORTED=" + MMapDirectory.UNMAP_SUPPORTED);
1059     }
1060   }
1061 
1062   /**
1063    * Gracefully closes (commits, waits for merges), but calls rollback
1064    * if there's an exc so the IndexWriter is always closed.  This is called
1065    * from {@link #close} when {@link IndexWriterConfig#commitOnClose} is
1066    * {@code true}.
1067    */
1068   private void shutdown() throws IOException {
1069     if (pendingCommit != null) {
1070       throw new IllegalStateException("cannot close: prepareCommit was already called with no corresponding call to commit");
1071     }
1072     // Ensure that only one thread actually gets to do the
1073     // closing
1074     if (shouldClose(true)) {
1075       boolean success = false;
1076       try {
1077         if (infoStream.isEnabled("IW")) {
1078           infoStream.message("IW", "now flush at close");
1079         }
1080         flush(true, true);
1081         waitForMerges();
1082         commitInternal(config.getMergePolicy());
1083         rollbackInternal(); // ie close, since we just committed
1084         success = true;
1085       } finally {
1086         if (success == false) {
1087           // Be certain to close the index on any exception
1088           try {
1089             rollbackInternal();
1090           } catch (Throwable t) {
1091             // Suppress so we keep throwing original exception
1092           }
1093         }
1094       }
1095     }
1096   }
1097 
1098   /**
1099    * Closes all open resources and releases the write lock.
1100    *
1101    * If {@link IndexWriterConfig#commitOnClose} is <code>true</code>,
1102    * this will attempt to gracefully shut down by writing any
1103    * changes, waiting for any running merges, committing, and closing.
1104    * In this case, note that:
1105    * <ul>
1106    *   <li>If you called prepareCommit but failed to call commit, this
1107    *       method will throw {@code IllegalStateException} and the {@code IndexWriter}
1108    *       will not be closed.</li>
1109    *   <li>If this method throws any other exception, the {@code IndexWriter}
1110    *       will be closed, but changes may have been lost.</li>
1111    * </ul>
1112    *
1113    * <p>
1114    * Note that this may be a costly
1115    * operation, so, try to re-use a single writer instead of
1116    * closing and opening a new one.  See {@link #commit()} for
1117    * caveats about write caching done by some IO devices.
1118    *
1119    * <p><b>NOTE</b>: You must ensure no other threads are still making
1120    * changes at the same time that this method is invoked.</p>
1121    */
1122   @Override
1123   public void close() throws IOException {
1124     if (config.getCommitOnClose()) {
1125       shutdown();
1126     } else {
1127       rollback();
1128     }
1129   }
1130 
1131   // Returns true if this thread should attempt to close, or
1132   // false if IndexWriter is now closed; else,
1133   // waits until another thread finishes closing
1134   synchronized private boolean shouldClose(boolean waitForClose) {
1135     while (true) {
1136       if (closed == false) {
1137         if (closing == false) {
1138           // We get to close
1139           closing = true;
1140           return true;
1141         } else if (waitForClose == false) {
1142           return false;
1143         } else {
1144           // Another thread is presently trying to close;
1145           // wait until it finishes one way (closes
1146           // successfully) or another (fails to close)
1147           doWait();
1148         }
1149       } else {
1150         return false;
1151       }
1152     }
1153   }
1154 
1155   /** Returns the Directory used by this index. */
1156   public Directory getDirectory() {
1157     // return the original directory the user supplied, unwrapped.
1158     return directoryOrig;
1159   }
1160 
1161   /** Returns the analyzer used by this index. */
1162   public Analyzer getAnalyzer() {
1163     ensureOpen();
1164     return analyzer;
1165   }
1166 
1167   /** Returns total number of docs in this index, including
1168    *  docs not yet flushed (still in the RAM buffer),
1169    *  not counting deletions.
1170    *  @see #numDocs */
1171   public synchronized int maxDoc() {
1172     ensureOpen();
1173     return docWriter.getNumDocs() + segmentInfos.totalMaxDoc();
1174   }
1175 
1176   /** Returns total number of docs in this index, including
1177    *  docs not yet flushed (still in the RAM buffer), and
1178    *  including deletions.  <b>NOTE:</b> buffered deletions
1179    *  are not counted.  If you really need these to be
1180    *  counted you should call {@link #commit()} first.
1181    *  @see #numDocs */
1182   public synchronized int numDocs() {
1183     ensureOpen();
1184     int count = docWriter.getNumDocs();
1185     for (final SegmentCommitInfo info : segmentInfos) {
1186       count += info.info.maxDoc() - numDeletedDocs(info);
1187     }
1188     return count;
1189   }
1190 
1191   /**
1192    * Returns true if this index has deletions (including
1193    * buffered deletions).  Note that this will return true
1194    * if there are buffered Term/Query deletions, even if it
1195    * turns out those buffered deletions don't match any
1196    * documents.
1197    */
1198   public synchronized boolean hasDeletions() {
1199     ensureOpen();
1200     if (bufferedUpdatesStream.any()) {
1201       return true;
1202     }
1203     if (docWriter.anyDeletions()) {
1204       return true;
1205     }
1206     if (readerPool.anyPendingDeletes()) {
1207       return true;
1208     }
1209     for (final SegmentCommitInfo info : segmentInfos) {
1210       if (info.hasDeletions()) {
1211         return true;
1212       }
1213     }
1214     return false;
1215   }
1216 
1217   /**
1218    * Adds a document to this index.
1219    *
1220    * <p> Note that if an Exception is hit (for example disk full)
1221    * then the index will be consistent, but this document
1222    * may not have been added.  Furthermore, it's possible
1223    * the index will have one segment in non-compound format
1224    * even when using compound files (when a merge has
1225    * partially succeeded).</p>
1226    *
1227    * <p> This method periodically flushes pending documents
1228    * to the Directory (see <a href="#flush">above</a>), and
1229    * also periodically triggers segment merges in the index
1230    * according to the {@link MergePolicy} in use.</p>
1231    *
1232    * <p>Merges temporarily consume space in the
1233    * directory. The amount of space required is up to 1X the
1234    * size of all segments being merged, when no
1235    * readers/searchers are open against the index, and up to
1236    * 2X the size of all segments being merged when
1237    * readers/searchers are open against the index (see
1238    * {@link #forceMerge(int)} for details). The sequence of
1239    * primitive merge operations performed is governed by the
1240    * merge policy.
1241    *
1242    * <p>Note that each term in the document can be no longer
1243    * than {@link #MAX_TERM_LENGTH} in bytes, otherwise an
1244    * IllegalArgumentException will be thrown.</p>
1245    *
1246    * <p>Note that it's possible to create an invalid Unicode
1247    * string in java if a UTF16 surrogate pair is malformed.
1248    * In this case, the invalid characters are silently
1249    * replaced with the Unicode replacement character
1250    * U+FFFD.</p>
1251    *
1252    * @throws CorruptIndexException if the index is corrupt
1253    * @throws IOException if there is a low-level IO error
1254    */
1255   public void addDocument(Iterable<? extends IndexableField> doc) throws IOException {
1256     updateDocument(null, doc);
1257   }
1258 
1259   /**
1260    * Atomically adds a block of documents with sequentially
1261    * assigned document IDs, such that an external reader
1262    * will see all or none of the documents.
1263    *
1264    * <p><b>WARNING</b>: the index does not currently record
1265    * which documents were added as a block.  Today this is
1266    * fine, because merging will preserve a block. The order of
1267    * documents within a segment will be preserved, even when child
1268    * documents within a block are deleted. Most search features
1269    * (like result grouping and block joining) require you to
1270    * mark documents; when these documents are deleted these
1271    * search features will not work as expected. Obviously adding
1272    * documents to an existing block will require you the reindex
1273    * the entire block.
1274    *
1275    * <p>However it's possible that in the future Lucene may
1276    * merge more aggressively re-order documents (for example,
1277    * perhaps to obtain better index compression), in which case
1278    * you may need to fully re-index your documents at that time.
1279    *
1280    * <p>See {@link #addDocument(Iterable)} for details on
1281    * index and IndexWriter state after an Exception, and
1282    * flushing/merging temporary free space requirements.</p>
1283    *
1284    * <p><b>NOTE</b>: tools that do offline splitting of an index
1285    * (for example, IndexSplitter in contrib) or
1286    * re-sorting of documents (for example, IndexSorter in
1287    * contrib) are not aware of these atomically added documents
1288    * and will likely break them up.  Use such tools at your
1289    * own risk!
1290    *
1291    * @throws CorruptIndexException if the index is corrupt
1292    * @throws IOException if there is a low-level IO error
1293    *
1294    * @lucene.experimental
1295    */
1296   public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
1297     updateDocuments(null, docs);
1298   }
1299 
1300   /**
1301    * Atomically deletes documents matching the provided
1302    * delTerm and adds a block of documents with sequentially
1303    * assigned document IDs, such that an external reader
1304    * will see all or none of the documents. 
1305    *
1306    * See {@link #addDocuments(Iterable)}.
1307    *
1308    * @throws CorruptIndexException if the index is corrupt
1309    * @throws IOException if there is a low-level IO error
1310    *
1311    * @lucene.experimental
1312    */
1313   public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
1314     ensureOpen();
1315     try {
1316       boolean success = false;
1317       try {
1318         if (docWriter.updateDocuments(docs, analyzer, delTerm)) {
1319           processEvents(true, false);
1320         }
1321         success = true;
1322       } finally {
1323         if (!success) {
1324           if (infoStream.isEnabled("IW")) {
1325             infoStream.message("IW", "hit exception updating document");
1326           }
1327         }
1328       }
1329     } catch (AbortingException | VirtualMachineError tragedy) {
1330       tragicEvent(tragedy, "updateDocuments");
1331     }
1332   }
1333 
1334   /** Expert: attempts to delete by document ID, as long as
1335    *  the provided reader is a near-real-time reader (from {@link
1336    *  DirectoryReader#open(IndexWriter,boolean)}).  If the
1337    *  provided reader is an NRT reader obtained from this
1338    *  writer, and its segment has not been merged away, then
1339    *  the delete succeeds and this method returns true; else, it
1340    *  returns false the caller must then separately delete by
1341    *  Term or Query.
1342    *
1343    *  <b>NOTE</b>: this method can only delete documents
1344    *  visible to the currently open NRT reader.  If you need
1345    *  to delete documents indexed after opening the NRT
1346    *  reader you must use {@link #deleteDocuments(Term...)}). */
1347   public synchronized boolean tryDeleteDocument(IndexReader readerIn, int docID) throws IOException {
1348 
1349     final LeafReader reader;
1350     if (readerIn instanceof LeafReader) {
1351       // Reader is already atomic: use the incoming docID:
1352       reader = (LeafReader) readerIn;
1353     } else {
1354       // Composite reader: lookup sub-reader and re-base docID:
1355       List<LeafReaderContext> leaves = readerIn.leaves();
1356       int subIndex = ReaderUtil.subIndex(docID, leaves);
1357       reader = leaves.get(subIndex).reader();
1358       docID -= leaves.get(subIndex).docBase;
1359       assert docID >= 0;
1360       assert docID < reader.maxDoc();
1361     }
1362 
1363     if (!(reader instanceof SegmentReader)) {
1364       throw new IllegalArgumentException("the reader must be a SegmentReader or composite reader containing only SegmentReaders");
1365     }
1366       
1367     final SegmentCommitInfo info = ((SegmentReader) reader).getSegmentInfo();
1368 
1369     // TODO: this is a slow linear search, but, number of
1370     // segments should be contained unless something is
1371     // seriously wrong w/ the index, so it should be a minor
1372     // cost:
1373 
1374     if (segmentInfos.indexOf(info) != -1) {
1375       ReadersAndUpdates rld = readerPool.get(info, false);
1376       if (rld != null) {
1377         synchronized(bufferedUpdatesStream) {
1378           rld.initWritableLiveDocs();
1379           if (rld.delete(docID)) {
1380             final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount();
1381             if (fullDelCount == rld.info.info.maxDoc()) {
1382               // If a merge has already registered for this
1383               // segment, we leave it in the readerPool; the
1384               // merge will skip merging it and will then drop
1385               // it once it's done:
1386               if (!mergingSegments.contains(rld.info)) {
1387                 segmentInfos.remove(rld.info);
1388                 readerPool.drop(rld.info);
1389                 checkpoint();
1390               }
1391             }
1392 
1393             // Must bump changeCount so if no other changes
1394             // happened, we still commit this change:
1395             changed();
1396           }
1397           //System.out.println("  yes " + info.info.name + " " + docID);
1398           return true;
1399         }
1400       } else {
1401         //System.out.println("  no rld " + info.info.name + " " + docID);
1402       }
1403     } else {
1404       //System.out.println("  no seg " + info.info.name + " " + docID);
1405     }
1406     return false;
1407   }
1408 
1409   /**
1410    * Deletes the document(s) containing any of the
1411    * terms. All given deletes are applied and flushed atomically
1412    * at the same time.
1413    *
1414    * @param terms array of terms to identify the documents
1415    * to be deleted
1416    * @throws CorruptIndexException if the index is corrupt
1417    * @throws IOException if there is a low-level IO error
1418    */
1419   public void deleteDocuments(Term... terms) throws IOException {
1420     ensureOpen();
1421     try {
1422       if (docWriter.deleteTerms(terms)) {
1423         processEvents(true, false);
1424       }
1425     } catch (VirtualMachineError tragedy) {
1426       tragicEvent(tragedy, "deleteDocuments(Term..)");
1427     }
1428   }
1429 
1430   /**
1431    * Deletes the document(s) matching any of the provided queries.
1432    * All given deletes are applied and flushed atomically at the same time.
1433    *
1434    * @param queries array of queries to identify the documents
1435    * to be deleted
1436    * @throws CorruptIndexException if the index is corrupt
1437    * @throws IOException if there is a low-level IO error
1438    */
1439   public void deleteDocuments(Query... queries) throws IOException {
1440     ensureOpen();
1441 
1442     // LUCENE-6379: Specialize MatchAllDocsQuery
1443     for(Query query : queries) {
1444       if (query.getClass() == MatchAllDocsQuery.class) {
1445         deleteAll();
1446         return;
1447       }
1448     }
1449 
1450     try {
1451       if (docWriter.deleteQueries(queries)) {
1452         processEvents(true, false);
1453       }
1454     } catch (VirtualMachineError tragedy) {
1455       tragicEvent(tragedy, "deleteDocuments(Query..)");
1456     }
1457   }
1458 
1459   /**
1460    * Updates a document by first deleting the document(s)
1461    * containing <code>term</code> and then adding the new
1462    * document.  The delete and then add are atomic as seen
1463    * by a reader on the same index (flush may happen only after
1464    * the add).
1465    *
1466    * @param term the term to identify the document(s) to be
1467    * deleted
1468    * @param doc the document to be added
1469    * @throws CorruptIndexException if the index is corrupt
1470    * @throws IOException if there is a low-level IO error
1471    */
1472   public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws IOException {
1473     ensureOpen();
1474     try {
1475       boolean success = false;
1476       try {
1477         if (docWriter.updateDocument(doc, analyzer, term)) {
1478           processEvents(true, false);
1479         }
1480         success = true;
1481       } finally {
1482         if (!success) {
1483           if (infoStream.isEnabled("IW")) {
1484             infoStream.message("IW", "hit exception updating document");
1485           }
1486         }
1487       }
1488     } catch (AbortingException | VirtualMachineError tragedy) {
1489       tragicEvent(tragedy, "updateDocument");
1490     }
1491   }
1492 
1493   /**
1494    * Updates a document's {@link NumericDocValues} for <code>field</code> to the
1495    * given <code>value</code>. You can only update fields that already exist in
1496    * the index, not add new fields through this method.
1497    * 
1498    * @param term
1499    *          the term to identify the document(s) to be updated
1500    * @param field
1501    *          field name of the {@link NumericDocValues} field
1502    * @param value
1503    *          new value for the field
1504    * @throws CorruptIndexException
1505    *           if the index is corrupt
1506    * @throws IOException
1507    *           if there is a low-level IO error
1508    */
1509   public void updateNumericDocValue(Term term, String field, long value) throws IOException {
1510     ensureOpen();
1511     if (!globalFieldNumberMap.contains(field, DocValuesType.NUMERIC)) {
1512       throw new IllegalArgumentException("can only update existing numeric-docvalues fields!");
1513     }
1514     try {
1515       if (docWriter.updateDocValues(new NumericDocValuesUpdate(term, field, value))) {
1516         processEvents(true, false);
1517       }
1518     } catch (VirtualMachineError tragedy) {
1519       tragicEvent(tragedy, "updateNumericDocValue");
1520     }
1521   }
1522 
1523   /**
1524    * Updates a document's {@link BinaryDocValues} for <code>field</code> to the
1525    * given <code>value</code>. You can only update fields that already exist in
1526    * the index, not add new fields through this method.
1527    * 
1528    * <p>
1529    * <b>NOTE:</b> this method currently replaces the existing value of all
1530    * affected documents with the new value.
1531    * 
1532    * @param term
1533    *          the term to identify the document(s) to be updated
1534    * @param field
1535    *          field name of the {@link BinaryDocValues} field
1536    * @param value
1537    *          new value for the field
1538    * @throws CorruptIndexException
1539    *           if the index is corrupt
1540    * @throws IOException
1541    *           if there is a low-level IO error
1542    */
1543   public void updateBinaryDocValue(Term term, String field, BytesRef value) throws IOException {
1544     ensureOpen();
1545     if (value == null) {
1546       throw new IllegalArgumentException("cannot update a field to a null value: " + field);
1547     }
1548     if (!globalFieldNumberMap.contains(field, DocValuesType.BINARY)) {
1549       throw new IllegalArgumentException("can only update existing binary-docvalues fields!");
1550     }
1551     try {
1552       if (docWriter.updateDocValues(new BinaryDocValuesUpdate(term, field, value))) {
1553         processEvents(true, false);
1554       }
1555     } catch (VirtualMachineError tragedy) {
1556       tragicEvent(tragedy, "updateBinaryDocValue");
1557     }
1558   }
1559   
1560   /**
1561    * Updates documents' DocValues fields to the given values. Each field update
1562    * is applied to the set of documents that are associated with the
1563    * {@link Term} to the same value. All updates are atomically applied and
1564    * flushed together.
1565    * 
1566    * @param updates
1567    *          the updates to apply
1568    * @throws CorruptIndexException
1569    *           if the index is corrupt
1570    * @throws IOException
1571    *           if there is a low-level IO error
1572    */
1573   public void updateDocValues(Term term, Field... updates) throws IOException {
1574     ensureOpen();
1575     DocValuesUpdate[] dvUpdates = new DocValuesUpdate[updates.length];
1576     for (int i = 0; i < updates.length; i++) {
1577       final Field f = updates[i];
1578       final DocValuesType dvType = f.fieldType().docValuesType();
1579       if (dvType == null) {
1580         throw new NullPointerException("DocValuesType cannot be null (field: \"" + f.name() + "\")");
1581       }
1582       if (dvType == DocValuesType.NONE) {
1583         throw new IllegalArgumentException("can only update NUMERIC or BINARY fields! field=" + f.name());
1584       }
1585       if (!globalFieldNumberMap.contains(f.name(), dvType)) {
1586         throw new IllegalArgumentException("can only update existing docvalues fields! field=" + f.name() + ", type=" + dvType);
1587       }
1588       switch (dvType) {
1589         case NUMERIC:
1590           dvUpdates[i] = new NumericDocValuesUpdate(term, f.name(), (Long) f.numericValue());
1591           break;
1592         case BINARY:
1593           dvUpdates[i] = new BinaryDocValuesUpdate(term, f.name(), f.binaryValue());
1594           break;
1595         default:
1596           throw new IllegalArgumentException("can only update NUMERIC or BINARY fields: field=" + f.name() + ", type=" + dvType);
1597       }
1598     }
1599     try {
1600       if (docWriter.updateDocValues(dvUpdates)) {
1601         processEvents(true, false);
1602       }
1603     } catch (VirtualMachineError tragedy) {
1604       tragicEvent(tragedy, "updateDocValues");
1605     }
1606   }
1607   
1608   // for test purpose
1609   final synchronized int getSegmentCount(){
1610     return segmentInfos.size();
1611   }
1612 
1613   // for test purpose
1614   final synchronized int getNumBufferedDocuments(){
1615     return docWriter.getNumDocs();
1616   }
1617 
1618   // for test purpose
1619   final synchronized Collection<String> getIndexFileNames() throws IOException {
1620     return segmentInfos.files(true);
1621   }
1622 
1623   // for test purpose
1624   final synchronized int maxDoc(int i) {
1625     if (i >= 0 && i < segmentInfos.size()) {
1626       return segmentInfos.info(i).info.maxDoc();
1627     } else {
1628       return -1;
1629     }
1630   }
1631 
1632   // for test purpose
1633   final int getFlushCount() {
1634     return flushCount.get();
1635   }
1636 
1637   // for test purpose
1638   final int getFlushDeletesCount() {
1639     return flushDeletesCount.get();
1640   }
1641 
1642   final String newSegmentName() {
1643     // Cannot synchronize on IndexWriter because that causes
1644     // deadlock
1645     synchronized(segmentInfos) {
1646       // Important to increment changeCount so that the
1647       // segmentInfos is written on close.  Otherwise we
1648       // could close, re-open and re-return the same segment
1649       // name that was previously returned which can cause
1650       // problems at least with ConcurrentMergeScheduler.
1651       changeCount.incrementAndGet();
1652       segmentInfos.changed();
1653       return "_" + Integer.toString(segmentInfos.counter++, Character.MAX_RADIX);
1654     }
1655   }
1656 
1657   /** If enabled, information about merges will be printed to this.
1658    */
1659   final InfoStream infoStream;
1660 
1661   /**
1662    * Forces merge policy to merge segments until there are
1663    * {@code <= maxNumSegments}.  The actual merges to be
1664    * executed are determined by the {@link MergePolicy}.
1665    *
1666    * <p>This is a horribly costly operation, especially when
1667    * you pass a small {@code maxNumSegments}; usually you
1668    * should only call this if the index is static (will no
1669    * longer be changed).</p>
1670    *
1671    * <p>Note that this requires free space that is proportional
1672    * to the size of the index in your Directory: 2X if you are
1673    * not using compound file format, and 3X if you are.
1674    * For example, if your index size is 10 MB then you need
1675    * an additional 20 MB free for this to complete (30 MB if
1676    * you're using compound file format). This is also affected
1677    * by the {@link Codec} that is used to execute the merge,
1678    * and may result in even a bigger index. Also, it's best
1679    * to call {@link #commit()} afterwards, to allow IndexWriter
1680    * to free up disk space.</p>
1681    *
1682    * <p>If some but not all readers re-open while merging
1683    * is underway, this will cause {@code > 2X} temporary
1684    * space to be consumed as those new readers will then
1685    * hold open the temporary segments at that time.  It is
1686    * best not to re-open readers while merging is running.</p>
1687    *
1688    * <p>The actual temporary usage could be much less than
1689    * these figures (it depends on many factors).</p>
1690    *
1691    * <p>In general, once this completes, the total size of the
1692    * index will be less than the size of the starting index.
1693    * It could be quite a bit smaller (if there were many
1694    * pending deletes) or just slightly smaller.</p>
1695    *
1696    * <p>If an Exception is hit, for example
1697    * due to disk full, the index will not be corrupted and no
1698    * documents will be lost.  However, it may have
1699    * been partially merged (some segments were merged but
1700    * not all), and it's possible that one of the segments in
1701    * the index will be in non-compound format even when
1702    * using compound file format.  This will occur when the
1703    * Exception is hit during conversion of the segment into
1704    * compound format.</p>
1705    *
1706    * <p>This call will merge those segments present in
1707    * the index when the call started.  If other threads are
1708    * still adding documents and flushing segments, those
1709    * newly created segments will not be merged unless you
1710    * call forceMerge again.</p>
1711    *
1712    * @param maxNumSegments maximum number of segments left
1713    * in the index after merging finishes
1714    * 
1715    * @throws CorruptIndexException if the index is corrupt
1716    * @throws IOException if there is a low-level IO error
1717    * @see MergePolicy#findMerges
1718    *
1719   */
1720   public void forceMerge(int maxNumSegments) throws IOException {
1721     forceMerge(maxNumSegments, true);
1722   }
1723 
1724   /** Just like {@link #forceMerge(int)}, except you can
1725    *  specify whether the call should block until
1726    *  all merging completes.  This is only meaningful with a
1727    *  {@link MergeScheduler} that is able to run merges in
1728    *  background threads.
1729    */
1730   public void forceMerge(int maxNumSegments, boolean doWait) throws IOException {
1731     ensureOpen();
1732 
1733     if (maxNumSegments < 1)
1734       throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
1735 
1736     if (infoStream.isEnabled("IW")) {
1737       infoStream.message("IW", "forceMerge: index now " + segString());
1738       infoStream.message("IW", "now flush at forceMerge");
1739     }
1740 
1741     flush(true, true);
1742 
1743     synchronized(this) {
1744       resetMergeExceptions();
1745       segmentsToMerge.clear();
1746       for(SegmentCommitInfo info : segmentInfos) {
1747         segmentsToMerge.put(info, Boolean.TRUE);
1748       }
1749       mergeMaxNumSegments = maxNumSegments;
1750 
1751       // Now mark all pending & running merges for forced
1752       // merge:
1753       for(final MergePolicy.OneMerge merge  : pendingMerges) {
1754         merge.maxNumSegments = maxNumSegments;
1755         segmentsToMerge.put(merge.info, Boolean.TRUE);
1756       }
1757 
1758       for (final MergePolicy.OneMerge merge: runningMerges) {
1759         merge.maxNumSegments = maxNumSegments;
1760         segmentsToMerge.put(merge.info, Boolean.TRUE);
1761       }
1762     }
1763 
1764     maybeMerge(config.getMergePolicy(), MergeTrigger.EXPLICIT, maxNumSegments);
1765 
1766     if (doWait) {
1767       synchronized(this) {
1768         while(true) {
1769 
1770           if (tragedy != null) {
1771             throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete forceMerge", tragedy);
1772           }
1773 
1774           if (mergeExceptions.size() > 0) {
1775             // Forward any exceptions in background merge
1776             // threads to the current thread:
1777             final int size = mergeExceptions.size();
1778             for(int i=0;i<size;i++) {
1779               final MergePolicy.OneMerge merge = mergeExceptions.get(i);
1780               if (merge.maxNumSegments != -1) {
1781                 throw new IOException("background merge hit exception: " + merge.segString(), merge.getException());
1782               }
1783             }
1784           }
1785 
1786           if (maxNumSegmentsMergesPending())
1787             doWait();
1788           else
1789             break;
1790         }
1791       }
1792 
1793       // If close is called while we are still
1794       // running, throw an exception so the calling
1795       // thread will know merging did not
1796       // complete
1797       ensureOpen();
1798     }
1799     // NOTE: in the ConcurrentMergeScheduler case, when
1800     // doWait is false, we can return immediately while
1801     // background threads accomplish the merging
1802   }
1803 
1804   /** Returns true if any merges in pendingMerges or
1805    *  runningMerges are maxNumSegments merges. */
1806   private synchronized boolean maxNumSegmentsMergesPending() {
1807     for (final MergePolicy.OneMerge merge : pendingMerges) {
1808       if (merge.maxNumSegments != -1)
1809         return true;
1810     }
1811 
1812     for (final MergePolicy.OneMerge merge : runningMerges) {
1813       if (merge.maxNumSegments != -1)
1814         return true;
1815     }
1816 
1817     return false;
1818   }
1819 
1820   /** Just like {@link #forceMergeDeletes()}, except you can
1821    *  specify whether the call should block until the
1822    *  operation completes.  This is only meaningful with a
1823    *  {@link MergeScheduler} that is able to run merges in
1824    *  background threads. */
1825   public void forceMergeDeletes(boolean doWait)
1826     throws IOException {
1827     ensureOpen();
1828 
1829     flush(true, true);
1830 
1831     if (infoStream.isEnabled("IW")) {
1832       infoStream.message("IW", "forceMergeDeletes: index now " + segString());
1833     }
1834 
1835     final MergePolicy mergePolicy = config.getMergePolicy();
1836     MergePolicy.MergeSpecification spec;
1837     boolean newMergesFound = false;
1838     synchronized(this) {
1839       spec = mergePolicy.findForcedDeletesMerges(segmentInfos, this);
1840       newMergesFound = spec != null;
1841       if (newMergesFound) {
1842         final int numMerges = spec.merges.size();
1843         for(int i=0;i<numMerges;i++)
1844           registerMerge(spec.merges.get(i));
1845       }
1846     }
1847 
1848     mergeScheduler.merge(this, MergeTrigger.EXPLICIT, newMergesFound);
1849 
1850     if (spec != null && doWait) {
1851       final int numMerges = spec.merges.size();
1852       synchronized(this) {
1853         boolean running = true;
1854         while(running) {
1855 
1856           if (tragedy != null) {
1857             throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete forceMergeDeletes", tragedy);
1858           }
1859 
1860           // Check each merge that MergePolicy asked us to
1861           // do, to see if any of them are still running and
1862           // if any of them have hit an exception.
1863           running = false;
1864           for(int i=0;i<numMerges;i++) {
1865             final MergePolicy.OneMerge merge = spec.merges.get(i);
1866             if (pendingMerges.contains(merge) || runningMerges.contains(merge)) {
1867               running = true;
1868             }
1869             Throwable t = merge.getException();
1870             if (t != null) {
1871               throw new IOException("background merge hit exception: " + merge.segString(), t);
1872             }
1873           }
1874 
1875           // If any of our merges are still running, wait:
1876           if (running)
1877             doWait();
1878         }
1879       }
1880     }
1881 
1882     // NOTE: in the ConcurrentMergeScheduler case, when
1883     // doWait is false, we can return immediately while
1884     // background threads accomplish the merging
1885   }
1886 
1887 
1888   /**
1889    *  Forces merging of all segments that have deleted
1890    *  documents.  The actual merges to be executed are
1891    *  determined by the {@link MergePolicy}.  For example,
1892    *  the default {@link TieredMergePolicy} will only
1893    *  pick a segment if the percentage of
1894    *  deleted docs is over 10%.
1895    *
1896    *  <p>This is often a horribly costly operation; rarely
1897    *  is it warranted.</p>
1898    *
1899    *  <p>To see how
1900    *  many deletions you have pending in your index, call
1901    *  {@link IndexReader#numDeletedDocs}.</p>
1902    *
1903    *  <p><b>NOTE</b>: this method first flushes a new
1904    *  segment (if there are indexed documents), and applies
1905    *  all buffered deletes.
1906    */
1907   public void forceMergeDeletes() throws IOException {
1908     forceMergeDeletes(true);
1909   }
1910 
1911   /**
1912    * Expert: asks the mergePolicy whether any merges are
1913    * necessary now and if so, runs the requested merges and
1914    * then iterate (test again if merges are needed) until no
1915    * more merges are returned by the mergePolicy.
1916    *
1917    * Explicit calls to maybeMerge() are usually not
1918    * necessary. The most common case is when merge policy
1919    * parameters have changed.
1920    * 
1921    * This method will call the {@link MergePolicy} with
1922    * {@link MergeTrigger#EXPLICIT}.
1923    */
1924   public final void maybeMerge() throws IOException {
1925     maybeMerge(config.getMergePolicy(), MergeTrigger.EXPLICIT, UNBOUNDED_MAX_MERGE_SEGMENTS);
1926   }
1927 
1928   private final void maybeMerge(MergePolicy mergePolicy, MergeTrigger trigger, int maxNumSegments) throws IOException {
1929     ensureOpen(false);
1930     boolean newMergesFound = updatePendingMerges(mergePolicy, trigger, maxNumSegments);
1931     mergeScheduler.merge(this, trigger, newMergesFound);
1932   }
1933 
1934   private synchronized boolean updatePendingMerges(MergePolicy mergePolicy, MergeTrigger trigger, int maxNumSegments)
1935     throws IOException {
1936 
1937     // In case infoStream was disabled on init, but then enabled at some
1938     // point, try again to log the config here:
1939     messageState();
1940 
1941     assert maxNumSegments == -1 || maxNumSegments > 0;
1942     assert trigger != null;
1943     if (stopMerges) {
1944       return false;
1945     }
1946 
1947     // Do not start new merges if disaster struck
1948     if (tragedy != null) {
1949       return false;
1950     }
1951     boolean newMergesFound = false;
1952     final MergePolicy.MergeSpecification spec;
1953     if (maxNumSegments != UNBOUNDED_MAX_MERGE_SEGMENTS) {
1954       assert trigger == MergeTrigger.EXPLICIT || trigger == MergeTrigger.MERGE_FINISHED :
1955         "Expected EXPLICT or MERGE_FINISHED as trigger even with maxNumSegments set but was: " + trigger.name();
1956       spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge), this);
1957       newMergesFound = spec != null;
1958       if (newMergesFound) {
1959         final int numMerges = spec.merges.size();
1960         for(int i=0;i<numMerges;i++) {
1961           final MergePolicy.OneMerge merge = spec.merges.get(i);
1962           merge.maxNumSegments = maxNumSegments;
1963         }
1964       }
1965     } else {
1966       spec = mergePolicy.findMerges(trigger, segmentInfos, this);
1967     }
1968     newMergesFound = spec != null;
1969     if (newMergesFound) {
1970       final int numMerges = spec.merges.size();
1971       for(int i=0;i<numMerges;i++) {
1972         registerMerge(spec.merges.get(i));
1973       }
1974     }
1975     return newMergesFound;
1976   }
1977 
1978   /** Expert: to be used by a {@link MergePolicy} to avoid
1979    *  selecting merges for segments already being merged.
1980    *  The returned collection is not cloned, and thus is
1981    *  only safe to access if you hold IndexWriter's lock
1982    *  (which you do when IndexWriter invokes the
1983    *  MergePolicy).
1984    *
1985    *  <p>Do not alter the returned collection! */
1986   public synchronized Collection<SegmentCommitInfo> getMergingSegments() {
1987     return mergingSegments;
1988   }
1989 
1990   /**
1991    * Expert: the {@link MergeScheduler} calls this method to retrieve the next
1992    * merge requested by the MergePolicy
1993    * 
1994    * @lucene.experimental
1995    */
1996   public synchronized MergePolicy.OneMerge getNextMerge() {
1997     if (pendingMerges.size() == 0) {
1998       return null;
1999     } else {
2000       // Advance the merge from pending to running
2001       MergePolicy.OneMerge merge = pendingMerges.removeFirst();
2002       runningMerges.add(merge);
2003       return merge;
2004     }
2005   }
2006 
2007   /**
2008    * Expert: returns true if there are merges waiting to be scheduled.
2009    * 
2010    * @lucene.experimental
2011    */
2012   public synchronized boolean hasPendingMerges() {
2013     return pendingMerges.size() != 0;
2014   }
2015 
2016   /**
2017    * Close the <code>IndexWriter</code> without committing
2018    * any changes that have occurred since the last commit
2019    * (or since it was opened, if commit hasn't been called).
2020    * This removes any temporary files that had been created,
2021    * after which the state of the index will be the same as
2022    * it was when commit() was last called or when this
2023    * writer was first opened.  This also clears a previous
2024    * call to {@link #prepareCommit}.
2025    * @throws IOException if there is a low-level IO error
2026    */
2027   @Override
2028   public void rollback() throws IOException {
2029     // don't call ensureOpen here: this acts like "close()" in closeable.
2030     
2031     // Ensure that only one thread actually gets to do the
2032     // closing, and make sure no commit is also in progress:
2033     if (shouldClose(true)) {
2034       rollbackInternal();
2035     }
2036   }
2037 
2038   private void rollbackInternal() throws IOException {
2039     // Make sure no commit is running, else e.g. we can close while another thread is still fsync'ing:
2040     synchronized(commitLock) {
2041       rollbackInternalNoCommit();
2042     }
2043   }
2044 
2045   private void rollbackInternalNoCommit() throws IOException {
2046     boolean success = false;
2047 
2048     if (infoStream.isEnabled("IW")) {
2049       infoStream.message("IW", "rollback");
2050     }
2051     
2052     try {
2053       abortMerges();
2054 
2055       rateLimiters.close();
2056 
2057       if (infoStream.isEnabled("IW")) {
2058         infoStream.message("IW", "rollback: done finish merges");
2059       }
2060 
2061       // Must pre-close in case it increments changeCount so that we can then
2062       // set it to false before calling rollbackInternal
2063       mergeScheduler.close();
2064 
2065       bufferedUpdatesStream.clear();
2066       docWriter.close(); // mark it as closed first to prevent subsequent indexing actions/flushes 
2067       docWriter.abort(this); // don't sync on IW here
2068       synchronized(this) {
2069 
2070         if (pendingCommit != null) {
2071           pendingCommit.rollbackCommit(directory);
2072           try {
2073             deleter.decRef(pendingCommit);
2074           } finally {
2075             pendingCommit = null;
2076             notifyAll();
2077           }
2078         }
2079 
2080         // Don't bother saving any changes in our segmentInfos
2081         readerPool.dropAll(false);
2082 
2083         // Keep the same segmentInfos instance but replace all
2084         // of its SegmentInfo instances so IFD below will remove
2085         // any segments we flushed since the last commit:
2086         segmentInfos.rollbackSegmentInfos(rollbackSegments);
2087 
2088         if (infoStream.isEnabled("IW") ) {
2089           infoStream.message("IW", "rollback: infos=" + segString(segmentInfos));
2090         }
2091 
2092         testPoint("rollback before checkpoint");
2093 
2094         // Ask deleter to locate unreferenced files & remove
2095         // them ... only when we are not experiencing a tragedy, else
2096         // these methods throw ACE:
2097         if (tragedy == null) {
2098           deleter.checkpoint(segmentInfos, false);
2099           deleter.refresh();
2100           deleter.close();
2101         }
2102 
2103         lastCommitChangeCount = changeCount.get();
2104 
2105         // Must set closed while inside same sync block where we call deleter.refresh, else concurrent threads may try to sneak a flush in,
2106         // after we leave this sync block and before we enter the sync block in the finally clause below that sets closed:
2107         closed = true;
2108 
2109         IOUtils.close(writeLock);                     // release write lock
2110         writeLock = null;
2111       }
2112 
2113       success = true;
2114     } catch (VirtualMachineError tragedy) {
2115       tragicEvent(tragedy, "rollbackInternal");
2116     } finally {
2117       if (success == false) {
2118         // Must not hold IW's lock while closing
2119         // mergeScheduler: this can lead to deadlock,
2120         // e.g. TestIW.testThreadInterruptDeadlock
2121         IOUtils.closeWhileHandlingException(mergeScheduler);
2122       }
2123       synchronized(this) {
2124         if (success == false) {
2125           // we tried to be nice about it: do the minimum
2126           
2127           // don't leak a segments_N file if there is a pending commit
2128           if (pendingCommit != null) {
2129             try {
2130               pendingCommit.rollbackCommit(directory);
2131               deleter.decRef(pendingCommit);
2132             } catch (Throwable t) {
2133             }
2134             pendingCommit = null;
2135           }
2136           
2137           // close all the closeables we can (but important is readerPool and writeLock to prevent leaks)
2138           IOUtils.closeWhileHandlingException(readerPool, deleter, writeLock);
2139           writeLock = null;
2140         }
2141         closed = true;
2142         closing = false;
2143 
2144         // So any "concurrently closing" threads wake up and see that the close has now completed:
2145         notifyAll();
2146       }
2147     }
2148   }
2149 
2150   /**
2151    * Delete all documents in the index.
2152    * 
2153    * <p>
2154    * This method will drop all buffered documents and will remove all segments
2155    * from the index. This change will not be visible until a {@link #commit()}
2156    * has been called. This method can be rolled back using {@link #rollback()}.
2157    * </p>
2158    * 
2159    * <p>
2160    * NOTE: this method is much faster than using deleteDocuments( new
2161    * MatchAllDocsQuery() ). Yet, this method also has different semantics
2162    * compared to {@link #deleteDocuments(Query...)} since internal
2163    * data-structures are cleared as well as all segment information is
2164    * forcefully dropped anti-viral semantics like omitting norms are reset or
2165    * doc value types are cleared. Essentially a call to {@link #deleteAll()} is
2166    * equivalent to creating a new {@link IndexWriter} with
2167    * {@link OpenMode#CREATE} which a delete query only marks documents as
2168    * deleted.
2169    * </p>
2170    * 
2171    * <p>
2172    * NOTE: this method will forcefully abort all merges in progress. If other
2173    * threads are running {@link #forceMerge}, {@link #addIndexes(CodecReader[])}
2174    * or {@link #forceMergeDeletes} methods, they may receive
2175    * {@link MergePolicy.MergeAbortedException}s.
2176    */
2177   public void deleteAll() throws IOException {
2178     ensureOpen();
2179     // Remove any buffered docs
2180     boolean success = false;
2181     /* hold the full flush lock to prevent concurrency commits / NRT reopens to
2182      * get in our way and do unnecessary work. -- if we don't lock this here we might
2183      * get in trouble if */
2184     /*
2185      * We first abort and trash everything we have in-memory
2186      * and keep the thread-states locked, the lockAndAbortAll operation
2187      * also guarantees "point in time semantics" ie. the checkpoint that we need in terms
2188      * of logical happens-before relationship in the DW. So we do
2189      * abort all in memory structures 
2190      * We also drop global field numbering before during abort to make
2191      * sure it's just like a fresh index.
2192      */
2193     try {
2194       synchronized (fullFlushLock) { 
2195         long abortedDocCount = docWriter.lockAndAbortAll(this);
2196         pendingNumDocs.addAndGet(-abortedDocCount);
2197         
2198         processEvents(false, true);
2199         synchronized (this) {
2200           try {
2201             // Abort any running merges
2202             abortMerges();
2203             // Let merges run again
2204             stopMerges = false;
2205             // Remove all segments
2206             pendingNumDocs.addAndGet(-segmentInfos.totalMaxDoc());
2207             segmentInfos.clear();
2208             // Ask deleter to locate unreferenced files & remove them:
2209             deleter.checkpoint(segmentInfos, false);
2210             /* don't refresh the deleter here since there might
2211              * be concurrent indexing requests coming in opening
2212              * files on the directory after we called DW#abort()
2213              * if we do so these indexing requests might hit FNF exceptions.
2214              * We will remove the files incrementally as we go...
2215              */
2216             // Don't bother saving any changes in our segmentInfos
2217             readerPool.dropAll(false);
2218             // Mark that the index has changed
2219             changeCount.incrementAndGet();
2220             segmentInfos.changed();
2221             globalFieldNumberMap.clear();
2222 
2223             success = true;
2224           } finally {
2225             docWriter.unlockAllAfterAbortAll(this);
2226             if (!success) {
2227               if (infoStream.isEnabled("IW")) {
2228                 infoStream.message("IW", "hit exception during deleteAll");
2229               }
2230             }
2231           }
2232         }
2233       }
2234     } catch (VirtualMachineError tragedy) {
2235       tragicEvent(tragedy, "deleteAll");
2236     }
2237   }
2238 
2239   /** Aborts running merges.  Be careful when using this
2240    *  method: when you abort a long-running merge, you lose
2241    *  a lot of work that must later be redone. */
2242   private synchronized void abortMerges() {
2243 
2244     stopMerges = true;
2245 
2246     // Abort all pending & running merges:
2247     for (final MergePolicy.OneMerge merge : pendingMerges) {
2248       if (infoStream.isEnabled("IW")) {
2249         infoStream.message("IW", "now abort pending merge " + segString(merge.segments));
2250       }
2251       merge.rateLimiter.setAbort();
2252       mergeFinish(merge);
2253     }
2254     pendingMerges.clear();
2255 
2256     for (final MergePolicy.OneMerge merge : runningMerges) {
2257       if (infoStream.isEnabled("IW")) {
2258         infoStream.message("IW", "now abort running merge " + segString(merge.segments));
2259       }
2260       merge.rateLimiter.setAbort();
2261     }
2262 
2263     // We wait here to make all merges stop.  It should not
2264     // take very long because they periodically check if
2265     // they are aborted.
2266     while (runningMerges.size() != 0) {
2267 
2268       if (infoStream.isEnabled("IW")) {
2269         infoStream.message("IW", "now wait for " + runningMerges.size() + " running merge/s to abort");
2270       }
2271 
2272       doWait();
2273     }
2274 
2275     notifyAll();
2276     assert 0 == mergingSegments.size();
2277 
2278     if (infoStream.isEnabled("IW")) {
2279       infoStream.message("IW", "all running merges have aborted");
2280     }
2281   }
2282 
2283   /**
2284    * Wait for any currently outstanding merges to finish.
2285    *
2286    * <p>It is guaranteed that any merges started prior to calling this method
2287    *    will have completed once this method completes.</p>
2288    */
2289   void waitForMerges() throws IOException {
2290 
2291     // Give merge scheduler last chance to run, in case
2292     // any pending merges are waiting. We can't hold IW's lock
2293     // when going into merge because it can lead to deadlock.
2294     mergeScheduler.merge(this, MergeTrigger.CLOSING, false);
2295 
2296     synchronized (this) {
2297       ensureOpen(false);
2298       if (infoStream.isEnabled("IW")) {
2299         infoStream.message("IW", "waitForMerges");
2300       }
2301 
2302       while (pendingMerges.size() > 0 || runningMerges.size() > 0) {
2303         doWait();
2304       }
2305 
2306       // sanity check
2307       assert 0 == mergingSegments.size();
2308 
2309       if (infoStream.isEnabled("IW")) {
2310         infoStream.message("IW", "waitForMerges done");
2311       }
2312     }
2313   }
2314 
2315   /**
2316    * Called whenever the SegmentInfos has been updated and
2317    * the index files referenced exist (correctly) in the
2318    * index directory.
2319    */
2320   synchronized void checkpoint() throws IOException {
2321     changed();
2322     deleter.checkpoint(segmentInfos, false);
2323   }
2324 
2325   /** Checkpoints with IndexFileDeleter, so it's aware of
2326    *  new files, and increments changeCount, so on
2327    *  close/commit we will write a new segments file, but
2328    *  does NOT bump segmentInfos.version. */
2329   synchronized void checkpointNoSIS() throws IOException {
2330     changeCount.incrementAndGet();
2331     deleter.checkpoint(segmentInfos, false);
2332   }
2333 
2334   /** Called internally if any index state has changed. */
2335   synchronized void changed() {
2336     changeCount.incrementAndGet();
2337     segmentInfos.changed();
2338   }
2339 
2340   synchronized void publishFrozenUpdates(FrozenBufferedUpdates packet) {
2341     assert packet != null && packet.any();
2342     synchronized (bufferedUpdatesStream) {
2343       bufferedUpdatesStream.push(packet);
2344     }
2345   }
2346   
2347   /**
2348    * Atomically adds the segment private delete packet and publishes the flushed
2349    * segments SegmentInfo to the index writer.
2350    */
2351   void publishFlushedSegment(SegmentCommitInfo newSegment,
2352       FrozenBufferedUpdates packet, FrozenBufferedUpdates globalPacket) throws IOException {
2353     try {
2354       synchronized (this) {
2355         // Lock order IW -> BDS
2356         ensureOpen(false);
2357         synchronized (bufferedUpdatesStream) {
2358           if (infoStream.isEnabled("IW")) {
2359             infoStream.message("IW", "publishFlushedSegment");
2360           }
2361           
2362           if (globalPacket != null && globalPacket.any()) {
2363             bufferedUpdatesStream.push(globalPacket);
2364           } 
2365           // Publishing the segment must be synched on IW -> BDS to make the sure
2366           // that no merge prunes away the seg. private delete packet
2367           final long nextGen;
2368           if (packet != null && packet.any()) {
2369             nextGen = bufferedUpdatesStream.push(packet);
2370           } else {
2371             // Since we don't have a delete packet to apply we can get a new
2372             // generation right away
2373             nextGen = bufferedUpdatesStream.getNextGen();
2374           }
2375           if (infoStream.isEnabled("IW")) {
2376             infoStream.message("IW", "publish sets newSegment delGen=" + nextGen + " seg=" + segString(newSegment));
2377           }
2378           newSegment.setBufferedDeletesGen(nextGen);
2379           segmentInfos.add(newSegment);
2380           checkpoint();
2381         }
2382       }
2383     } finally {
2384       flushCount.incrementAndGet();
2385       doAfterFlush();
2386     }
2387   }
2388 
2389   private synchronized void resetMergeExceptions() {
2390     mergeExceptions = new ArrayList<>();
2391     mergeGen++;
2392   }
2393 
2394   private void noDupDirs(Directory... dirs) {
2395     HashSet<Directory> dups = new HashSet<>();
2396     for(int i=0;i<dirs.length;i++) {
2397       if (dups.contains(dirs[i]))
2398         throw new IllegalArgumentException("Directory " + dirs[i] + " appears more than once");
2399       if (dirs[i] == directoryOrig)
2400         throw new IllegalArgumentException("Cannot add directory to itself");
2401       dups.add(dirs[i]);
2402     }
2403   }
2404 
2405   /** Acquires write locks on all the directories; be sure
2406    *  to match with a call to {@link IOUtils#close} in a
2407    *  finally clause. */
2408   private List<Lock> acquireWriteLocks(Directory... dirs) throws IOException {
2409     List<Lock> locks = new ArrayList<>(dirs.length);
2410     for(int i=0;i<dirs.length;i++) {
2411       boolean success = false;
2412       try {
2413         Lock lock = dirs[i].obtainLock(WRITE_LOCK_NAME);
2414         locks.add(lock);
2415         success = true;
2416       } finally {
2417         if (success == false) {
2418           // Release all previously acquired locks:
2419           // TODO: addSuppressed? it could be many...
2420           IOUtils.closeWhileHandlingException(locks);
2421         }
2422       }
2423     }
2424     return locks;
2425   }
2426 
2427   /**
2428    * Adds all segments from an array of indexes into this index.
2429    *
2430    * <p>This may be used to parallelize batch indexing. A large document
2431    * collection can be broken into sub-collections. Each sub-collection can be
2432    * indexed in parallel, on a different thread, process or machine. The
2433    * complete index can then be created by merging sub-collection indexes
2434    * with this method.
2435    *
2436    * <p>
2437    * <b>NOTE:</b> this method acquires the write lock in
2438    * each directory, to ensure that no {@code IndexWriter}
2439    * is currently open or tries to open while this is
2440    * running.
2441    *
2442    * <p>This method is transactional in how Exceptions are
2443    * handled: it does not commit a new segments_N file until
2444    * all indexes are added.  This means if an Exception
2445    * occurs (for example disk full), then either no indexes
2446    * will have been added or they all will have been.
2447    *
2448    * <p>Note that this requires temporary free space in the
2449    * {@link Directory} up to 2X the sum of all input indexes
2450    * (including the starting index). If readers/searchers
2451    * are open against the starting index, then temporary
2452    * free space required will be higher by the size of the
2453    * starting index (see {@link #forceMerge(int)} for details).
2454    *
2455    * <p>This requires this index not be among those to be added.
2456    *
2457    * @throws CorruptIndexException if the index is corrupt
2458    * @throws IOException if there is a low-level IO error
2459    * @throws IllegalArgumentException if addIndexes would cause
2460    *   the index to exceed {@link #MAX_DOCS}
2461    */
2462   public void addIndexes(Directory... dirs) throws IOException {
2463     ensureOpen();
2464 
2465     noDupDirs(dirs);
2466 
2467     List<Lock> locks = acquireWriteLocks(dirs);
2468 
2469     boolean successTop = false;
2470 
2471     try {
2472       if (infoStream.isEnabled("IW")) {
2473         infoStream.message("IW", "flush at addIndexes(Directory...)");
2474       }
2475 
2476       flush(false, true);
2477 
2478       List<SegmentCommitInfo> infos = new ArrayList<>();
2479 
2480       // long so we can detect int overflow:
2481       long totalMaxDoc = 0;
2482       List<SegmentInfos> commits = new ArrayList<>(dirs.length);
2483       for (Directory dir : dirs) {
2484         if (infoStream.isEnabled("IW")) {
2485           infoStream.message("IW", "addIndexes: process directory " + dir);
2486         }
2487         SegmentInfos sis = SegmentInfos.readLatestCommit(dir); // read infos from dir
2488         totalMaxDoc += sis.totalMaxDoc();
2489         commits.add(sis);
2490       }
2491 
2492       // Best-effort up front check:
2493       testReserveDocs(totalMaxDoc);
2494         
2495       boolean success = false;
2496       try {
2497         for (SegmentInfos sis : commits) {
2498           for (SegmentCommitInfo info : sis) {
2499             assert !infos.contains(info): "dup info dir=" + info.info.dir + " name=" + info.info.name;
2500 
2501             String newSegName = newSegmentName();
2502 
2503             if (infoStream.isEnabled("IW")) {
2504               infoStream.message("IW", "addIndexes: process segment origName=" + info.info.name + " newName=" + newSegName + " info=" + info);
2505             }
2506 
2507             IOContext context = new IOContext(new FlushInfo(info.info.maxDoc(), info.sizeInBytes()));
2508 
2509             FieldInfos fis = readFieldInfos(info);
2510             for(FieldInfo fi : fis) {
2511               globalFieldNumberMap.addOrGet(fi.name, fi.number, fi.getDocValuesType());
2512             }
2513             infos.add(copySegmentAsIs(info, newSegName, context));
2514           }
2515         }
2516         success = true;
2517       } finally {
2518         if (!success) {
2519           for(SegmentCommitInfo sipc : infos) {
2520             // Safe: these files must exist
2521             deleteNewFiles(sipc.files());
2522           }
2523         }
2524       }
2525 
2526       synchronized (this) {
2527         success = false;
2528         try {
2529           ensureOpen();
2530 
2531           // Now reserve the docs, just before we update SIS:
2532           reserveDocs(totalMaxDoc);
2533 
2534           success = true;
2535         } finally {
2536           if (!success) {
2537             for(SegmentCommitInfo sipc : infos) {
2538               // Safe: these files must exist
2539               deleteNewFiles(sipc.files());
2540             }
2541           }
2542         }
2543         segmentInfos.addAll(infos);
2544         checkpoint();
2545       }
2546 
2547       successTop = true;
2548 
2549     } catch (VirtualMachineError tragedy) {
2550       tragicEvent(tragedy, "addIndexes(Directory...)");
2551     } finally {
2552       if (successTop) {
2553         IOUtils.close(locks);
2554       } else {
2555         IOUtils.closeWhileHandlingException(locks);
2556       }
2557     }
2558     maybeMerge();
2559   }
2560   
2561   /**
2562    * Merges the provided indexes into this index.
2563    * 
2564    * <p>
2565    * The provided IndexReaders are not closed.
2566    * 
2567    * <p>
2568    * See {@link #addIndexes} for details on transactional semantics, temporary
2569    * free space required in the Directory, and non-CFS segments on an Exception.
2570    * 
2571    * <p>
2572    * <b>NOTE:</b> empty segments are dropped by this method and not added to this
2573    * index.
2574    * 
2575    * <p>
2576    * <b>NOTE:</b> this method merges all given {@link LeafReader}s in one
2577    * merge. If you intend to merge a large number of readers, it may be better
2578    * to call this method multiple times, each time with a small set of readers.
2579    * In principle, if you use a merge policy with a {@code mergeFactor} or
2580    * {@code maxMergeAtOnce} parameter, you should pass that many readers in one
2581    * call.
2582    * 
2583    * @throws CorruptIndexException
2584    *           if the index is corrupt
2585    * @throws IOException
2586    *           if there is a low-level IO error
2587    * @throws IllegalArgumentException
2588    *           if addIndexes would cause the index to exceed {@link #MAX_DOCS}
2589    */
2590   public void addIndexes(CodecReader... readers) throws IOException {
2591     ensureOpen();
2592 
2593     // long so we can detect int overflow:
2594     long numDocs = 0;
2595 
2596     try {
2597       if (infoStream.isEnabled("IW")) {
2598         infoStream.message("IW", "flush at addIndexes(CodecReader...)");
2599       }
2600       flush(false, true);
2601 
2602       String mergedName = newSegmentName();
2603       for (CodecReader leaf : readers) {
2604         numDocs += leaf.numDocs();
2605       }
2606       
2607       // Best-effort up front check:
2608       testReserveDocs(numDocs);
2609 
2610       final IOContext context = new IOContext(new MergeInfo((int) numDocs, -1, false, -1));
2611 
2612       // TODO: somehow we should fix this merge so it's
2613       // abortable so that IW.close(false) is able to stop it
2614       TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory);
2615 
2616       SegmentInfo info = new SegmentInfo(directoryOrig, Version.LATEST, mergedName, -1,
2617                                          false, codec, Collections.<String,String>emptyMap(), StringHelper.randomId(), new HashMap<String,String>());
2618 
2619       SegmentMerger merger = new SegmentMerger(Arrays.asList(readers), info, infoStream, trackingDir,
2620                                                globalFieldNumberMap, 
2621                                                context);
2622       
2623       rateLimiters.set(new MergeRateLimiter(null));
2624 
2625       if (!merger.shouldMerge()) {
2626         return;
2627       }
2628 
2629       merger.merge();                // merge 'em
2630 
2631       SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, -1L, -1L, -1L);
2632 
2633       info.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
2634       trackingDir.getCreatedFiles().clear();
2635                                          
2636       setDiagnostics(info, SOURCE_ADDINDEXES_READERS);
2637 
2638       final MergePolicy mergePolicy = config.getMergePolicy();
2639       boolean useCompoundFile;
2640       synchronized(this) { // Guard segmentInfos
2641         if (stopMerges) {
2642           // Safe: these files must exist
2643           deleteNewFiles(infoPerCommit.files());
2644           return;
2645         }
2646         ensureOpen();
2647         useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, infoPerCommit, this);
2648       }
2649 
2650       // Now create the compound file if needed
2651       if (useCompoundFile) {
2652         Collection<String> filesToDelete = infoPerCommit.files();
2653         TrackingDirectoryWrapper trackingCFSDir = new TrackingDirectoryWrapper(mergeDirectory);
2654         // TODO: unlike merge, on exception we arent sniping any trash cfs files here?
2655         // createCompoundFile tries to cleanup, but it might not always be able to...
2656         try {
2657           createCompoundFile(infoStream, trackingCFSDir, info, context);
2658         } finally {
2659           // delete new non cfs files directly: they were never
2660           // registered with IFD
2661           deleteNewFiles(filesToDelete);
2662         }
2663         info.setUseCompoundFile(true);
2664       }
2665 
2666       // Have codec write SegmentInfo.  Must do this after
2667       // creating CFS so that 1) .si isn't slurped into CFS,
2668       // and 2) .si reflects useCompoundFile=true change
2669       // above:
2670       codec.segmentInfoFormat().write(trackingDir, info, context);
2671 
2672       info.addFiles(trackingDir.getCreatedFiles());
2673 
2674       // Register the new segment
2675       synchronized(this) {
2676         if (stopMerges) {
2677           // Safe: these files must exist
2678           deleteNewFiles(infoPerCommit.files());
2679           return;
2680         }
2681         ensureOpen();
2682 
2683         // Now reserve the docs, just before we update SIS:
2684         reserveDocs(numDocs);
2685       
2686         segmentInfos.add(infoPerCommit);
2687         checkpoint();
2688       }
2689     } catch (VirtualMachineError tragedy) {
2690       tragicEvent(tragedy, "addIndexes(CodecReader...)");
2691     }
2692     maybeMerge();
2693   }
2694 
2695   /** Copies the segment files as-is into the IndexWriter's directory. */
2696   private SegmentCommitInfo copySegmentAsIs(SegmentCommitInfo info, String segName, IOContext context) throws IOException {
2697     
2698     //System.out.println("copy seg=" + info.info.name + " version=" + info.info.getVersion());
2699     // Same SI as before but we change directory and name
2700     SegmentInfo newInfo = new SegmentInfo(directoryOrig, info.info.getVersion(), segName, info.info.maxDoc(),
2701                                           info.info.getUseCompoundFile(), info.info.getCodec(), 
2702                                           info.info.getDiagnostics(), info.info.getId(), info.info.getAttributes());
2703     SegmentCommitInfo newInfoPerCommit = new SegmentCommitInfo(newInfo, info.getDelCount(), info.getDelGen(), 
2704                                                                info.getFieldInfosGen(), info.getDocValuesGen());
2705     
2706     newInfo.setFiles(info.files());
2707 
2708     boolean success = false;
2709 
2710     Set<String> copiedFiles = new HashSet<>();
2711     try {
2712       // Copy the segment's files
2713       for (String file: info.files()) {
2714         final String newFileName = newInfo.namedForThisSegment(file);
2715 
2716         assert !slowFileExists(directory, newFileName): "file \"" + newFileName + "\" already exists; newInfo.files=" + newInfo.files();
2717 
2718         directory.copyFrom(info.info.dir, file, newFileName, context);
2719         copiedFiles.add(newFileName);
2720       }
2721       success = true;
2722     } finally {
2723       if (!success) {
2724         // Safe: these files must exist
2725         deleteNewFiles(copiedFiles);
2726       }
2727     }
2728 
2729     assert copiedFiles.equals(newInfoPerCommit.files());
2730     
2731     return newInfoPerCommit;
2732   }
2733   
2734   /**
2735    * A hook for extending classes to execute operations after pending added and
2736    * deleted documents have been flushed to the Directory but before the change
2737    * is committed (new segments_N file written).
2738    */
2739   protected void doAfterFlush() throws IOException {}
2740 
2741   /**
2742    * A hook for extending classes to execute operations before pending added and
2743    * deleted documents are flushed to the Directory.
2744    */
2745   protected void doBeforeFlush() throws IOException {}
2746 
2747   /** <p>Expert: prepare for commit.  This does the
2748    *  first phase of 2-phase commit. This method does all
2749    *  steps necessary to commit changes since this writer
2750    *  was opened: flushes pending added and deleted docs,
2751    *  syncs the index files, writes most of next segments_N
2752    *  file.  After calling this you must call either {@link
2753    *  #commit()} to finish the commit, or {@link
2754    *  #rollback()} to revert the commit and undo all changes
2755    *  done since the writer was opened.</p>
2756    *
2757    * <p>You can also just call {@link #commit()} directly
2758    *  without prepareCommit first in which case that method
2759    *  will internally call prepareCommit.
2760    */
2761   @Override
2762   public final void prepareCommit() throws IOException {
2763     ensureOpen();
2764     prepareCommitInternal(config.getMergePolicy());
2765   }
2766 
2767   private void prepareCommitInternal(MergePolicy mergePolicy) throws IOException {
2768     startCommitTime = System.nanoTime();
2769     synchronized(commitLock) {
2770       ensureOpen(false);
2771       if (infoStream.isEnabled("IW")) {
2772         infoStream.message("IW", "prepareCommit: flush");
2773         infoStream.message("IW", "  index before flush " + segString());
2774       }
2775 
2776       if (tragedy != null) {
2777         throw new IllegalStateException("this writer hit an unrecoverable error; cannot commit", tragedy);
2778       }
2779 
2780       if (pendingCommit != null) {
2781         throw new IllegalStateException("prepareCommit was already called with no corresponding call to commit");
2782       }
2783 
2784       doBeforeFlush();
2785       testPoint("startDoFlush");
2786       SegmentInfos toCommit = null;
2787       boolean anySegmentsFlushed = false;
2788 
2789       // This is copied from doFlush, except it's modified to
2790       // clone & incRef the flushed SegmentInfos inside the
2791       // sync block:
2792 
2793       try {
2794 
2795         synchronized (fullFlushLock) {
2796           boolean flushSuccess = false;
2797           boolean success = false;
2798           try {
2799             anySegmentsFlushed = docWriter.flushAllThreads();
2800             if (!anySegmentsFlushed) {
2801               // prevent double increment since docWriter#doFlush increments the flushcount
2802               // if we flushed anything.
2803               flushCount.incrementAndGet();
2804             }
2805             processEvents(false, true);
2806             flushSuccess = true;
2807 
2808             synchronized(this) {
2809               maybeApplyDeletes(true);
2810 
2811               readerPool.commit(segmentInfos);
2812 
2813               if (changeCount.get() != lastCommitChangeCount) {
2814                 // There are changes to commit, so we will write a new segments_N in startCommit.
2815                 // The act of committing is itself an NRT-visible change (an NRT reader that was
2816                 // just opened before this should see it on reopen) so we increment changeCount
2817                 // and segments version so a future NRT reopen will see the change:
2818                 changeCount.incrementAndGet();
2819                 segmentInfos.changed();
2820               }
2821 
2822               // Must clone the segmentInfos while we still
2823               // hold fullFlushLock and while sync'd so that
2824               // no partial changes (eg a delete w/o
2825               // corresponding add from an updateDocument) can
2826               // sneak into the commit point:
2827               toCommit = segmentInfos.clone();
2828 
2829               pendingCommitChangeCount = changeCount.get();
2830 
2831               // This protects the segmentInfos we are now going
2832               // to commit.  This is important in case, eg, while
2833               // we are trying to sync all referenced files, a
2834               // merge completes which would otherwise have
2835               // removed the files we are now syncing.    
2836               filesToCommit = toCommit.files(false);
2837               deleter.incRef(filesToCommit);
2838             }
2839             success = true;
2840           } finally {
2841             if (!success) {
2842               if (infoStream.isEnabled("IW")) {
2843                 infoStream.message("IW", "hit exception during prepareCommit");
2844               }
2845             }
2846             // Done: finish the full flush!
2847             docWriter.finishFullFlush(this, flushSuccess);
2848             doAfterFlush();
2849           }
2850         }
2851       } catch (AbortingException | VirtualMachineError tragedy) {
2852         tragicEvent(tragedy, "prepareCommit");
2853       }
2854      
2855       boolean success = false;
2856       try {
2857         if (anySegmentsFlushed) {
2858           maybeMerge(mergePolicy, MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
2859         }
2860         startCommit(toCommit);
2861         success = true;
2862       } finally {
2863         if (!success) {
2864           synchronized (this) {
2865             if (filesToCommit != null) {
2866               deleter.decRefWhileHandlingException(filesToCommit);
2867               filesToCommit = null;
2868             }
2869           }
2870         }
2871       }
2872     }
2873   }
2874   
2875   /**
2876    * Sets the commit user data map. That method is considered a transaction by
2877    * {@link IndexWriter} and will be {@link #commit() committed} even if no other
2878    * changes were made to the writer instance. Note that you must call this method
2879    * before {@link #prepareCommit()}, or otherwise it won't be included in the
2880    * follow-on {@link #commit()}.
2881    * <p>
2882    * <b>NOTE:</b> the map is cloned internally, therefore altering the map's
2883    * contents after calling this method has no effect.
2884    */
2885   public final synchronized void setCommitData(Map<String,String> commitUserData) {
2886     segmentInfos.setUserData(new HashMap<>(commitUserData));
2887     changeCount.incrementAndGet();
2888   }
2889   
2890   /**
2891    * Returns the commit user data map that was last committed, or the one that
2892    * was set on {@link #setCommitData(Map)}.
2893    */
2894   public final synchronized Map<String,String> getCommitData() {
2895     return segmentInfos.getUserData();
2896   }
2897   
2898   // Used only by commit and prepareCommit, below; lock
2899   // order is commitLock -> IW
2900   private final Object commitLock = new Object();
2901 
2902   /**
2903    * <p>Commits all pending changes (added and deleted
2904    * documents, segment merges, added
2905    * indexes, etc.) to the index, and syncs all referenced
2906    * index files, such that a reader will see the changes
2907    * and the index updates will survive an OS or machine
2908    * crash or power loss.  Note that this does not wait for
2909    * any running background merges to finish.  This may be a
2910    * costly operation, so you should test the cost in your
2911    * application and do it only when really necessary.</p>
2912    *
2913    * <p> Note that this operation calls Directory.sync on
2914    * the index files.  That call should not return until the
2915    * file contents and metadata are on stable storage.  For
2916    * FSDirectory, this calls the OS's fsync.  But, beware:
2917    * some hardware devices may in fact cache writes even
2918    * during fsync, and return before the bits are actually
2919    * on stable storage, to give the appearance of faster
2920    * performance.  If you have such a device, and it does
2921    * not have a battery backup (for example) then on power
2922    * loss it may still lose data.  Lucene cannot guarantee
2923    * consistency on such devices.  </p>
2924    *
2925    * @see #prepareCommit
2926    */
2927   @Override
2928   public final void commit() throws IOException {
2929     ensureOpen();
2930     commitInternal(config.getMergePolicy());
2931   }
2932 
2933   /** Returns true if there may be changes that have not been
2934    *  committed.  There are cases where this may return true
2935    *  when there are no actual "real" changes to the index,
2936    *  for example if you've deleted by Term or Query but
2937    *  that Term or Query does not match any documents.
2938    *  Also, if a merge kicked off as a result of flushing a
2939    *  new segment during {@link #commit}, or a concurrent
2940    *  merged finished, this method may return true right
2941    *  after you had just called {@link #commit}. */
2942   public final boolean hasUncommittedChanges() {
2943     return changeCount.get() != lastCommitChangeCount || docWriter.anyChanges() || bufferedUpdatesStream.any();
2944   }
2945 
2946   private final void commitInternal(MergePolicy mergePolicy) throws IOException {
2947 
2948     if (infoStream.isEnabled("IW")) {
2949       infoStream.message("IW", "commit: start");
2950     }
2951 
2952     synchronized(commitLock) {
2953       ensureOpen(false);
2954 
2955       if (infoStream.isEnabled("IW")) {
2956         infoStream.message("IW", "commit: enter lock");
2957       }
2958 
2959       if (pendingCommit == null) {
2960         if (infoStream.isEnabled("IW")) {
2961           infoStream.message("IW", "commit: now prepare");
2962         }
2963         prepareCommitInternal(mergePolicy);
2964       } else {
2965         if (infoStream.isEnabled("IW")) {
2966           infoStream.message("IW", "commit: already prepared");
2967         }
2968       }
2969 
2970       finishCommit();
2971     }
2972   }
2973 
2974   private final void finishCommit() throws IOException {
2975 
2976     boolean commitCompleted = false;
2977     boolean finished = false;
2978     String committedSegmentsFileName = null;
2979 
2980     try {
2981       synchronized(this) {
2982         ensureOpen(false);
2983 
2984         if (tragedy != null) {
2985           throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete commit", tragedy);
2986         }
2987 
2988         if (pendingCommit != null) {
2989           try {
2990 
2991             if (infoStream.isEnabled("IW")) {
2992               infoStream.message("IW", "commit: pendingCommit != null");
2993             }
2994 
2995             committedSegmentsFileName = pendingCommit.finishCommit(directory);
2996 
2997             // we committed, if anything goes wrong after this, we are screwed and it's a tragedy:
2998             commitCompleted = true;
2999 
3000             if (infoStream.isEnabled("IW")) {
3001               infoStream.message("IW", "commit: done writing segments file \"" + committedSegmentsFileName + "\"");
3002             }
3003 
3004             // NOTE: don't use this.checkpoint() here, because
3005             // we do not want to increment changeCount:
3006             deleter.checkpoint(pendingCommit, true);
3007 
3008             // Carry over generation to our master SegmentInfos:
3009             segmentInfos.updateGeneration(pendingCommit);
3010 
3011             lastCommitChangeCount = pendingCommitChangeCount;
3012             rollbackSegments = pendingCommit.createBackupSegmentInfos();
3013 
3014             finished = true;
3015           } finally {
3016             notifyAll();
3017             try {
3018               if (finished) {
3019                 // all is good
3020                 deleter.decRef(filesToCommit);
3021               } else if (commitCompleted == false) {
3022                 // exc happened in finishCommit: not a tragedy
3023                 deleter.decRefWhileHandlingException(filesToCommit);
3024               }
3025             } finally {
3026               pendingCommit = null;
3027               filesToCommit = null;
3028             }
3029           }
3030         } else {
3031           assert filesToCommit == null;
3032           if (infoStream.isEnabled("IW")) {
3033             infoStream.message("IW", "commit: pendingCommit == null; skip");
3034           }
3035         }
3036       }
3037     } catch (Throwable t) {
3038       if (infoStream.isEnabled("IW")) {
3039         infoStream.message("IW", "hit exception during finishCommit: " + t.getMessage());
3040       }
3041       if (commitCompleted) {
3042         tragicEvent(t, "finishCommit");
3043       } else {
3044         IOUtils.reThrow(t);
3045       }
3046     }
3047 
3048     if (infoStream.isEnabled("IW")) {
3049       infoStream.message("IW", String.format(Locale.ROOT, "commit: took %.1f msec", (System.nanoTime()-startCommitTime)/1000000.0));
3050       infoStream.message("IW", "commit: done");
3051     }
3052   }
3053 
3054   // Ensures only one flush() is actually flushing segments
3055   // at a time:
3056   private final Object fullFlushLock = new Object();
3057   
3058   // for assert
3059   boolean holdsFullFlushLock() {
3060     return Thread.holdsLock(fullFlushLock);
3061   }
3062 
3063   /** Moves all in-memory segments to the {@link Directory}, but does not commit
3064    *  (fsync) them (call {@link #commit} for that). */
3065   public final void flush() throws IOException {
3066     flush(true, true);
3067   }
3068 
3069   /**
3070    * Flush all in-memory buffered updates (adds and deletes)
3071    * to the Directory.
3072    * @param triggerMerge if true, we may merge segments (if
3073    *  deletes or docs were flushed) if necessary
3074    * @param applyAllDeletes whether pending deletes should also
3075    */
3076   final void flush(boolean triggerMerge, boolean applyAllDeletes) throws IOException {
3077 
3078     // NOTE: this method cannot be sync'd because
3079     // maybeMerge() in turn calls mergeScheduler.merge which
3080     // in turn can take a long time to run and we don't want
3081     // to hold the lock for that.  In the case of
3082     // ConcurrentMergeScheduler this can lead to deadlock
3083     // when it stalls due to too many running merges.
3084 
3085     // We can be called during close, when closing==true, so we must pass false to ensureOpen:
3086     ensureOpen(false);
3087     if (doFlush(applyAllDeletes) && triggerMerge) {
3088       maybeMerge(config.getMergePolicy(), MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
3089     }
3090   }
3091 
3092   /** Returns true a segment was flushed or deletes were applied. */
3093   private boolean doFlush(boolean applyAllDeletes) throws IOException {
3094     if (tragedy != null) {
3095       throw new IllegalStateException("this writer hit an unrecoverable error; cannot flush", tragedy);
3096     }
3097 
3098     doBeforeFlush();
3099     testPoint("startDoFlush");
3100     boolean success = false;
3101     try {
3102 
3103       if (infoStream.isEnabled("IW")) {
3104         infoStream.message("IW", "  start flush: applyAllDeletes=" + applyAllDeletes);
3105         infoStream.message("IW", "  index before flush " + segString());
3106       }
3107       boolean anyChanges = false;
3108       
3109       synchronized (fullFlushLock) {
3110         boolean flushSuccess = false;
3111         try {
3112           anyChanges = docWriter.flushAllThreads();
3113           if (!anyChanges) {
3114             // flushCount is incremented in flushAllThreads
3115             flushCount.incrementAndGet();
3116           }
3117           flushSuccess = true;
3118         } finally {
3119           docWriter.finishFullFlush(this, flushSuccess);
3120           processEvents(false, true);
3121         }
3122       }
3123       synchronized(this) {
3124         anyChanges |= maybeApplyDeletes(applyAllDeletes);
3125         doAfterFlush();
3126         success = true;
3127         return anyChanges;
3128       }
3129     } catch (AbortingException | VirtualMachineError tragedy) {
3130       tragicEvent(tragedy, "doFlush");
3131       // never hit
3132       return false;
3133     } finally {
3134       if (!success) {
3135         if (infoStream.isEnabled("IW")) {
3136           infoStream.message("IW", "hit exception during flush");
3137         }
3138       }
3139     }
3140   }
3141   
3142   final synchronized boolean maybeApplyDeletes(boolean applyAllDeletes) throws IOException {
3143     if (applyAllDeletes) {
3144       if (infoStream.isEnabled("IW")) {
3145         infoStream.message("IW", "apply all deletes during flush");
3146       }
3147       return applyAllDeletesAndUpdates();
3148     } else if (infoStream.isEnabled("IW")) {
3149       infoStream.message("IW", "don't apply deletes now delTermCount=" + bufferedUpdatesStream.numTerms() + " bytesUsed=" + bufferedUpdatesStream.ramBytesUsed());
3150     }
3151 
3152     return false;
3153   }
3154   
3155   final synchronized boolean applyAllDeletesAndUpdates() throws IOException {
3156     flushDeletesCount.incrementAndGet();
3157     final BufferedUpdatesStream.ApplyDeletesResult result;
3158     if (infoStream.isEnabled("IW")) {
3159       infoStream.message("IW", "now apply all deletes for all segments maxDoc=" + (docWriter.getNumDocs() + segmentInfos.totalMaxDoc()));
3160     }
3161     result = bufferedUpdatesStream.applyDeletesAndUpdates(readerPool, segmentInfos.asList());
3162     if (result.anyDeletes) {
3163       checkpoint();
3164     }
3165     if (!keepFullyDeletedSegments && result.allDeleted != null) {
3166       if (infoStream.isEnabled("IW")) {
3167         infoStream.message("IW", "drop 100% deleted segments: " + segString(result.allDeleted));
3168       }
3169       for (SegmentCommitInfo info : result.allDeleted) {
3170         // If a merge has already registered for this
3171         // segment, we leave it in the readerPool; the
3172         // merge will skip merging it and will then drop
3173         // it once it's done:
3174         if (!mergingSegments.contains(info)) {
3175           segmentInfos.remove(info);
3176           pendingNumDocs.addAndGet(-info.info.maxDoc());
3177           readerPool.drop(info);
3178         }
3179       }
3180       checkpoint();
3181     }
3182     bufferedUpdatesStream.prune(segmentInfos);
3183     return result.anyDeletes;
3184   }
3185 
3186   // for testing only
3187   DocumentsWriter getDocsWriter() {
3188     return docWriter;
3189   }
3190 
3191   /** Expert:  Return the number of documents currently
3192    *  buffered in RAM. */
3193   public final synchronized int numRamDocs() {
3194     ensureOpen();
3195     return docWriter.getNumDocs();
3196   }
3197 
3198   private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) {
3199     for(SegmentCommitInfo info : merge.segments) {
3200       if (!segmentInfos.contains(info)) {
3201         throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString(), directoryOrig);
3202       }
3203     }
3204   }
3205 
3206   private void skipDeletedDoc(DocValuesFieldUpdates.Iterator[] updatesIters, int deletedDoc) {
3207     for (DocValuesFieldUpdates.Iterator iter : updatesIters) {
3208       if (iter.doc() == deletedDoc) {
3209         iter.nextDoc();
3210       }
3211       // when entering the method, all iterators must already be beyond the
3212       // deleted document, or right on it, in which case we advance them over
3213       // and they must be beyond it now.
3214       assert iter.doc() > deletedDoc : "updateDoc=" + iter.doc() + " deletedDoc=" + deletedDoc;
3215     }
3216   }
3217   
3218   private static class MergedDeletesAndUpdates {
3219     ReadersAndUpdates mergedDeletesAndUpdates = null;
3220     MergePolicy.DocMap docMap = null;
3221     boolean initializedWritableLiveDocs = false;
3222     
3223     MergedDeletesAndUpdates() {}
3224     
3225     final void init(ReaderPool readerPool, MergePolicy.OneMerge merge, MergeState mergeState, boolean initWritableLiveDocs) throws IOException {
3226       if (mergedDeletesAndUpdates == null) {
3227         mergedDeletesAndUpdates = readerPool.get(merge.info, true);
3228         docMap = merge.getDocMap(mergeState);
3229         assert docMap.isConsistent(merge.info.info.maxDoc());
3230       }
3231       if (initWritableLiveDocs && !initializedWritableLiveDocs) {
3232         mergedDeletesAndUpdates.initWritableLiveDocs();
3233         this.initializedWritableLiveDocs = true;
3234       }
3235     }
3236     
3237   }
3238   
3239   private void maybeApplyMergedDVUpdates(MergePolicy.OneMerge merge, MergeState mergeState, int docUpto, 
3240       MergedDeletesAndUpdates holder, String[] mergingFields, DocValuesFieldUpdates[] dvFieldUpdates,
3241       DocValuesFieldUpdates.Iterator[] updatesIters, int curDoc) throws IOException {
3242     int newDoc = -1;
3243     for (int idx = 0; idx < mergingFields.length; idx++) {
3244       DocValuesFieldUpdates.Iterator updatesIter = updatesIters[idx];
3245       if (updatesIter.doc() == curDoc) { // document has an update
3246         if (holder.mergedDeletesAndUpdates == null) {
3247           holder.init(readerPool, merge, mergeState, false);
3248         }
3249         if (newDoc == -1) { // map once per all field updates, but only if there are any updates
3250           newDoc = holder.docMap.map(docUpto);
3251         }
3252         DocValuesFieldUpdates dvUpdates = dvFieldUpdates[idx];
3253         dvUpdates.add(newDoc, updatesIter.value());
3254         updatesIter.nextDoc(); // advance to next document
3255       } else {
3256         assert updatesIter.doc() > curDoc : "field=" + mergingFields[idx] + " updateDoc=" + updatesIter.doc() + " curDoc=" + curDoc;
3257       }
3258     }
3259   }
3260 
3261   /**
3262    * Carefully merges deletes and updates for the segments we just merged. This
3263    * is tricky because, although merging will clear all deletes (compacts the
3264    * documents) and compact all the updates, new deletes and updates may have
3265    * been flushed to the segments since the merge was started. This method
3266    * "carries over" such new deletes and updates onto the newly merged segment,
3267    * and saves the resulting deletes and updates files (incrementing the delete
3268    * and DV generations for merge.info). If no deletes were flushed, no new
3269    * deletes file is saved.
3270    */
3271   synchronized private ReadersAndUpdates commitMergedDeletesAndUpdates(MergePolicy.OneMerge merge, MergeState mergeState) throws IOException {
3272 
3273     testPoint("startCommitMergeDeletes");
3274 
3275     final List<SegmentCommitInfo> sourceSegments = merge.segments;
3276 
3277     if (infoStream.isEnabled("IW")) {
3278       infoStream.message("IW", "commitMergeDeletes " + segString(merge.segments));
3279     }
3280 
3281     // Carefully merge deletes that occurred after we
3282     // started merging:
3283     int docUpto = 0;
3284     long minGen = Long.MAX_VALUE;
3285 
3286     // Lazy init (only when we find a delete to carry over):
3287     final MergedDeletesAndUpdates holder = new MergedDeletesAndUpdates();
3288     final DocValuesFieldUpdates.Container mergedDVUpdates = new DocValuesFieldUpdates.Container();
3289     
3290     for (int i = 0; i < sourceSegments.size(); i++) {
3291       SegmentCommitInfo info = sourceSegments.get(i);
3292       minGen = Math.min(info.getBufferedDeletesGen(), minGen);
3293       final int maxDoc = info.info.maxDoc();
3294       final Bits prevLiveDocs = merge.readers.get(i).getLiveDocs();
3295       final ReadersAndUpdates rld = readerPool.get(info, false);
3296       // We hold a ref so it should still be in the pool:
3297       assert rld != null: "seg=" + info.info.name;
3298       final Bits currentLiveDocs = rld.getLiveDocs();
3299       final Map<String,DocValuesFieldUpdates> mergingFieldUpdates = rld.getMergingFieldUpdates();
3300       final String[] mergingFields;
3301       final DocValuesFieldUpdates[] dvFieldUpdates;
3302       final DocValuesFieldUpdates.Iterator[] updatesIters;
3303       if (mergingFieldUpdates.isEmpty()) {
3304         mergingFields = null;
3305         updatesIters = null;
3306         dvFieldUpdates = null;
3307       } else {
3308         mergingFields = new String[mergingFieldUpdates.size()];
3309         dvFieldUpdates = new DocValuesFieldUpdates[mergingFieldUpdates.size()];
3310         updatesIters = new DocValuesFieldUpdates.Iterator[mergingFieldUpdates.size()];
3311         int idx = 0;
3312         for (Entry<String,DocValuesFieldUpdates> e : mergingFieldUpdates.entrySet()) {
3313           String field = e.getKey();
3314           DocValuesFieldUpdates updates = e.getValue();
3315           mergingFields[idx] = field;
3316           dvFieldUpdates[idx] = mergedDVUpdates.getUpdates(field, updates.type);
3317           if (dvFieldUpdates[idx] == null) {
3318             dvFieldUpdates[idx] = mergedDVUpdates.newUpdates(field, updates.type, mergeState.segmentInfo.maxDoc());
3319           }
3320           updatesIters[idx] = updates.iterator();
3321           updatesIters[idx].nextDoc(); // advance to first update doc
3322           ++idx;
3323         }
3324       }
3325 //      System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMergedDeletes: info=" + info + ", mergingUpdates=" + mergingUpdates);
3326 
3327       if (prevLiveDocs != null) {
3328 
3329         // If we had deletions on starting the merge we must
3330         // still have deletions now:
3331         assert currentLiveDocs != null;
3332         assert prevLiveDocs.length() == maxDoc;
3333         assert currentLiveDocs.length() == maxDoc;
3334 
3335         // There were deletes on this segment when the merge
3336         // started.  The merge has collapsed away those
3337         // deletes, but, if new deletes were flushed since
3338         // the merge started, we must now carefully keep any
3339         // newly flushed deletes but mapping them to the new
3340         // docIDs.
3341 
3342         // Since we copy-on-write, if any new deletes were
3343         // applied after merging has started, we can just
3344         // check if the before/after liveDocs have changed.
3345         // If so, we must carefully merge the liveDocs one
3346         // doc at a time:
3347         if (currentLiveDocs != prevLiveDocs) {
3348           // This means this segment received new deletes
3349           // since we started the merge, so we
3350           // must merge them:
3351           for (int j = 0; j < maxDoc; j++) {
3352             if (!prevLiveDocs.get(j)) {
3353               assert !currentLiveDocs.get(j);
3354             } else {
3355               if (!currentLiveDocs.get(j)) {
3356                 if (holder.mergedDeletesAndUpdates == null || !holder.initializedWritableLiveDocs) {
3357                   holder.init(readerPool, merge, mergeState, true);
3358                 }
3359                 holder.mergedDeletesAndUpdates.delete(holder.docMap.map(docUpto));
3360                 if (mergingFields != null) { // advance all iters beyond the deleted document
3361                   skipDeletedDoc(updatesIters, j);
3362                 }
3363               } else if (mergingFields != null) {
3364                 maybeApplyMergedDVUpdates(merge, mergeState, docUpto, holder, mergingFields, dvFieldUpdates, updatesIters, j);
3365               }
3366               docUpto++;
3367             }
3368           }
3369         } else if (mergingFields != null) {
3370           // need to check each non-deleted document if it has any updates
3371           for (int j = 0; j < maxDoc; j++) {
3372             if (prevLiveDocs.get(j)) {
3373               // document isn't deleted, check if any of the fields have an update to it
3374               maybeApplyMergedDVUpdates(merge, mergeState, docUpto, holder, mergingFields, dvFieldUpdates, updatesIters, j);
3375               // advance docUpto for every non-deleted document
3376               docUpto++;
3377             } else {
3378               // advance all iters beyond the deleted document
3379               skipDeletedDoc(updatesIters, j);
3380             }
3381           }
3382         } else {
3383           docUpto += info.info.maxDoc() - info.getDelCount() - rld.getPendingDeleteCount();
3384         }
3385       } else if (currentLiveDocs != null) {
3386         assert currentLiveDocs.length() == maxDoc;
3387         // This segment had no deletes before but now it
3388         // does:
3389         for (int j = 0; j < maxDoc; j++) {
3390           if (!currentLiveDocs.get(j)) {
3391             if (holder.mergedDeletesAndUpdates == null || !holder.initializedWritableLiveDocs) {
3392               holder.init(readerPool, merge, mergeState, true);
3393             }
3394             holder.mergedDeletesAndUpdates.delete(holder.docMap.map(docUpto));
3395             if (mergingFields != null) { // advance all iters beyond the deleted document
3396               skipDeletedDoc(updatesIters, j);
3397             }
3398           } else if (mergingFields != null) {
3399             maybeApplyMergedDVUpdates(merge, mergeState, docUpto, holder, mergingFields, dvFieldUpdates, updatesIters, j);
3400           }
3401           docUpto++;
3402         }
3403       } else if (mergingFields != null) {
3404         // no deletions before or after, but there were updates
3405         for (int j = 0; j < maxDoc; j++) {
3406           maybeApplyMergedDVUpdates(merge, mergeState, docUpto, holder, mergingFields, dvFieldUpdates, updatesIters, j);
3407           // advance docUpto for every non-deleted document
3408           docUpto++;
3409         }
3410       } else {
3411         // No deletes or updates before or after
3412         docUpto += info.info.maxDoc();
3413       }
3414     }
3415 
3416     assert docUpto == merge.info.info.maxDoc();
3417 
3418     if (mergedDVUpdates.any()) {
3419 //      System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMergedDeletes: mergedDeletes.info=" + mergedDeletes.info + ", mergedFieldUpdates=" + mergedFieldUpdates);
3420       boolean success = false;
3421       try {
3422         // if any error occurs while writing the field updates we should release
3423         // the info, otherwise it stays in the pool but is considered not "live"
3424         // which later causes false exceptions in pool.dropAll().
3425         // NOTE: currently this is the only place which throws a true
3426         // IOException. If this ever changes, we need to extend that try/finally
3427         // block to the rest of the method too.
3428         holder.mergedDeletesAndUpdates.writeFieldUpdates(directory, mergedDVUpdates);
3429         success = true;
3430       } finally {
3431         if (!success) {
3432           holder.mergedDeletesAndUpdates.dropChanges();
3433           readerPool.drop(merge.info);
3434         }
3435       }
3436     }
3437     
3438     if (infoStream.isEnabled("IW")) {
3439       if (holder.mergedDeletesAndUpdates == null) {
3440         infoStream.message("IW", "no new deletes or field updates since merge started");
3441       } else {
3442         String msg = holder.mergedDeletesAndUpdates.getPendingDeleteCount() + " new deletes";
3443         if (mergedDVUpdates.any()) {
3444           msg += " and " + mergedDVUpdates.size() + " new field updates";
3445         }
3446         msg += " since merge started";
3447         infoStream.message("IW", msg);
3448       }
3449     }
3450 
3451     merge.info.setBufferedDeletesGen(minGen);
3452 
3453     return holder.mergedDeletesAndUpdates;
3454   }
3455 
3456   synchronized private boolean commitMerge(MergePolicy.OneMerge merge, MergeState mergeState) throws IOException {
3457 
3458     testPoint("startCommitMerge");
3459 
3460     if (tragedy != null) {
3461       throw new IllegalStateException("this writer hit an unrecoverable error; cannot complete merge", tragedy);
3462     }
3463 
3464     if (infoStream.isEnabled("IW")) {
3465       infoStream.message("IW", "commitMerge: " + segString(merge.segments) + " index=" + segString());
3466     }
3467 
3468     assert merge.registerDone;
3469 
3470     // If merge was explicitly aborted, or, if rollback() or
3471     // rollbackTransaction() had been called since our merge
3472     // started (which results in an unqualified
3473     // deleter.refresh() call that will remove any index
3474     // file that current segments does not reference), we
3475     // abort this merge
3476     if (merge.rateLimiter.getAbort()) {
3477       if (infoStream.isEnabled("IW")) {
3478         infoStream.message("IW", "commitMerge: skip: it was aborted");
3479       }
3480       // In case we opened and pooled a reader for this
3481       // segment, drop it now.  This ensures that we close
3482       // the reader before trying to delete any of its
3483       // files.  This is not a very big deal, since this
3484       // reader will never be used by any NRT reader, and
3485       // another thread is currently running close(false)
3486       // so it will be dropped shortly anyway, but not
3487       // doing this  makes  MockDirWrapper angry in
3488       // TestNRTThreads (LUCENE-5434):
3489       readerPool.drop(merge.info);
3490 
3491       // Safe: these files must exist:
3492       deleteNewFiles(merge.info.files());
3493       return false;
3494     }
3495 
3496     final ReadersAndUpdates mergedUpdates = merge.info.info.maxDoc() == 0 ? null : commitMergedDeletesAndUpdates(merge, mergeState);
3497 //    System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMerge: mergedDeletes=" + mergedDeletes);
3498 
3499     // If the doc store we are using has been closed and
3500     // is in now compound format (but wasn't when we
3501     // started), then we will switch to the compound
3502     // format as well:
3503 
3504     assert !segmentInfos.contains(merge.info);
3505 
3506     final boolean allDeleted = merge.segments.size() == 0 ||
3507       merge.info.info.maxDoc() == 0 ||
3508       (mergedUpdates != null &&
3509        mergedUpdates.getPendingDeleteCount() == merge.info.info.maxDoc());
3510 
3511     if (infoStream.isEnabled("IW")) {
3512       if (allDeleted) {
3513         infoStream.message("IW", "merged segment " + merge.info + " is 100% deleted" +  (keepFullyDeletedSegments ? "" : "; skipping insert"));
3514       }
3515     }
3516 
3517     final boolean dropSegment = allDeleted && !keepFullyDeletedSegments;
3518 
3519     // If we merged no segments then we better be dropping
3520     // the new segment:
3521     assert merge.segments.size() > 0 || dropSegment;
3522 
3523     assert merge.info.info.maxDoc() != 0 || keepFullyDeletedSegments || dropSegment;
3524 
3525     if (mergedUpdates != null) {
3526       boolean success = false;
3527       try {
3528         if (dropSegment) {
3529           mergedUpdates.dropChanges();
3530         }
3531         // Pass false for assertInfoLive because the merged
3532         // segment is not yet live (only below do we commit it
3533         // to the segmentInfos):
3534         readerPool.release(mergedUpdates, false);
3535         success = true;
3536       } finally {
3537         if (!success) {
3538           mergedUpdates.dropChanges();
3539           readerPool.drop(merge.info);
3540         }
3541       }
3542     }
3543 
3544     // Must do this after readerPool.release, in case an
3545     // exception is hit e.g. writing the live docs for the
3546     // merge segment, in which case we need to abort the
3547     // merge:
3548     segmentInfos.applyMergeChanges(merge, dropSegment);
3549 
3550     // Now deduct the deleted docs that we just reclaimed from this
3551     // merge:
3552     int delDocCount = merge.totalMaxDoc - merge.info.info.maxDoc();
3553     assert delDocCount >= 0;
3554     pendingNumDocs.addAndGet(-delDocCount);
3555 
3556     if (dropSegment) {
3557       assert !segmentInfos.contains(merge.info);
3558       readerPool.drop(merge.info);
3559       // Safe: these files must exist
3560       deleteNewFiles(merge.info.files());
3561     }
3562 
3563     boolean success = false;
3564     try {
3565       // Must close before checkpoint, otherwise IFD won't be
3566       // able to delete the held-open files from the merge
3567       // readers:
3568       closeMergeReaders(merge, false);
3569       success = true;
3570     } finally {
3571       // Must note the change to segmentInfos so any commits
3572       // in-flight don't lose it (IFD will incRef/protect the
3573       // new files we created):
3574       if (success) {
3575         checkpoint();
3576       } else {
3577         try {
3578           checkpoint();
3579         } catch (Throwable t) {
3580           // Ignore so we keep throwing original exception.
3581         }
3582       }
3583     }
3584 
3585     deleter.deletePendingFiles();
3586 
3587     if (infoStream.isEnabled("IW")) {
3588       infoStream.message("IW", "after commitMerge: " + segString());
3589     }
3590 
3591     if (merge.maxNumSegments != -1 && !dropSegment) {
3592       // cascade the forceMerge:
3593       if (!segmentsToMerge.containsKey(merge.info)) {
3594         segmentsToMerge.put(merge.info, Boolean.FALSE);
3595       }
3596     }
3597 
3598     return true;
3599   }
3600 
3601   final private void handleMergeException(Throwable t, MergePolicy.OneMerge merge) throws IOException {
3602 
3603     if (infoStream.isEnabled("IW")) {
3604       infoStream.message("IW", "handleMergeException: merge=" + segString(merge.segments) + " exc=" + t);
3605     }
3606 
3607     // Set the exception on the merge, so if
3608     // forceMerge is waiting on us it sees the root
3609     // cause exception:
3610     merge.setException(t);
3611     addMergeException(merge);
3612 
3613     if (t instanceof MergePolicy.MergeAbortedException) {
3614       // We can ignore this exception (it happens when
3615       // deleteAll or rollback is called), unless the
3616       // merge involves segments from external directories,
3617       // in which case we must throw it so, for example, the
3618       // rollbackTransaction code in addIndexes* is
3619       // executed.
3620       if (merge.isExternal) {
3621         throw (MergePolicy.MergeAbortedException) t;
3622       }
3623     } else {
3624       IOUtils.reThrow(t);
3625     }
3626   }
3627 
3628   /**
3629    * Merges the indicated segments, replacing them in the stack with a
3630    * single segment.
3631    * 
3632    * @lucene.experimental
3633    */
3634   public void merge(MergePolicy.OneMerge merge) throws IOException {
3635 
3636     boolean success = false;
3637 
3638     rateLimiters.set(merge.rateLimiter);
3639 
3640     final long t0 = System.currentTimeMillis();
3641 
3642     final MergePolicy mergePolicy = config.getMergePolicy();
3643     try {
3644       try {
3645         try {
3646           mergeInit(merge);
3647           //if (merge.info != null) {
3648           //System.out.println("MERGE: " + merge.info.info.name);
3649           //}
3650 
3651           if (infoStream.isEnabled("IW")) {
3652             infoStream.message("IW", "now merge\n  merge=" + segString(merge.segments) + "\n  index=" + segString());
3653           }
3654 
3655           mergeMiddle(merge, mergePolicy);
3656           mergeSuccess(merge);
3657           success = true;
3658         } catch (Throwable t) {
3659           handleMergeException(t, merge);
3660         }
3661       } finally {
3662         synchronized(this) {
3663 
3664           mergeFinish(merge);
3665 
3666           if (success == false) {
3667             if (infoStream.isEnabled("IW")) {
3668               infoStream.message("IW", "hit exception during merge");
3669             }
3670           } else if (merge.rateLimiter.getAbort() == false && (merge.maxNumSegments != -1 || (!closed && !closing))) {
3671             // This merge (and, generally, any change to the
3672             // segments) may now enable new merges, so we call
3673             // merge policy & update pending merges.
3674             updatePendingMerges(mergePolicy, MergeTrigger.MERGE_FINISHED, merge.maxNumSegments);
3675           }
3676         }
3677       }
3678     } catch (Throwable t) {
3679       // Important that tragicEvent is called after mergeFinish, else we hang
3680       // waiting for our merge thread to be removed from runningMerges:
3681       tragicEvent(t, "merge");
3682     }
3683 
3684     if (merge.info != null && merge.rateLimiter.getAbort() == false) {
3685       if (infoStream.isEnabled("IW")) {
3686         infoStream.message("IW", "merge time " + (System.currentTimeMillis()-t0) + " msec for " + merge.info.info.maxDoc() + " docs");
3687       }
3688     }
3689   }
3690 
3691   /** Hook that's called when the specified merge is complete. */
3692   void mergeSuccess(MergePolicy.OneMerge merge) {
3693   }
3694 
3695   /** Checks whether this merge involves any segments
3696    *  already participating in a merge.  If not, this merge
3697    *  is "registered", meaning we record that its segments
3698    *  are now participating in a merge, and true is
3699    *  returned.  Else (the merge conflicts) false is
3700    *  returned. */
3701   final synchronized boolean registerMerge(MergePolicy.OneMerge merge) throws IOException {
3702 
3703     if (merge.registerDone) {
3704       return true;
3705     }
3706     assert merge.segments.size() > 0;
3707 
3708     if (stopMerges) {
3709       merge.rateLimiter.setAbort();
3710       throw new MergePolicy.MergeAbortedException("merge is aborted: " + segString(merge.segments));
3711     }
3712 
3713     boolean isExternal = false;
3714     for(SegmentCommitInfo info : merge.segments) {
3715       if (mergingSegments.contains(info)) {
3716         if (infoStream.isEnabled("IW")) {
3717           infoStream.message("IW", "reject merge " + segString(merge.segments) + ": segment " + segString(info) + " is already marked for merge");
3718         }
3719         return false;
3720       }
3721       if (!segmentInfos.contains(info)) {
3722         if (infoStream.isEnabled("IW")) {
3723           infoStream.message("IW", "reject merge " + segString(merge.segments) + ": segment " + segString(info) + " does not exist in live infos");
3724         }
3725         return false;
3726       }
3727       if (info.info.dir != directoryOrig) {
3728         isExternal = true;
3729       }
3730       if (segmentsToMerge.containsKey(info)) {
3731         merge.maxNumSegments = mergeMaxNumSegments;
3732       }
3733     }
3734 
3735     ensureValidMerge(merge);
3736 
3737     pendingMerges.add(merge);
3738 
3739     if (infoStream.isEnabled("IW")) {
3740       infoStream.message("IW", "add merge to pendingMerges: " + segString(merge.segments) + " [total " + pendingMerges.size() + " pending]");
3741     }
3742 
3743     merge.mergeGen = mergeGen;
3744     merge.isExternal = isExternal;
3745 
3746     // OK it does not conflict; now record that this merge
3747     // is running (while synchronized) to avoid race
3748     // condition where two conflicting merges from different
3749     // threads, start
3750     if (infoStream.isEnabled("IW")) {
3751       StringBuilder builder = new StringBuilder("registerMerge merging= [");
3752       for (SegmentCommitInfo info : mergingSegments) {
3753         builder.append(info.info.name).append(", ");  
3754       }
3755       builder.append("]");
3756       // don't call mergingSegments.toString() could lead to ConcurrentModException
3757       // since merge updates the segments FieldInfos
3758       if (infoStream.isEnabled("IW")) {
3759         infoStream.message("IW", builder.toString());  
3760       }
3761     }
3762     for(SegmentCommitInfo info : merge.segments) {
3763       if (infoStream.isEnabled("IW")) {
3764         infoStream.message("IW", "registerMerge info=" + segString(info));
3765       }
3766       mergingSegments.add(info);
3767     }
3768 
3769     assert merge.estimatedMergeBytes == 0;
3770     assert merge.totalMergeBytes == 0;
3771     for(SegmentCommitInfo info : merge.segments) {
3772       if (info.info.maxDoc() > 0) {
3773         final int delCount = numDeletedDocs(info);
3774         assert delCount <= info.info.maxDoc();
3775         final double delRatio = ((double) delCount)/info.info.maxDoc();
3776         merge.estimatedMergeBytes += info.sizeInBytes() * (1.0 - delRatio);
3777         merge.totalMergeBytes += info.sizeInBytes();
3778       }
3779     }
3780 
3781     // Merge is now registered
3782     merge.registerDone = true;
3783 
3784     return true;
3785   }
3786 
3787   /** Does initial setup for a merge, which is fast but holds
3788    *  the synchronized lock on IndexWriter instance.  */
3789   final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
3790     boolean success = false;
3791     try {
3792       _mergeInit(merge);
3793       success = true;
3794     } finally {
3795       if (!success) {
3796         if (infoStream.isEnabled("IW")) {
3797           infoStream.message("IW", "hit exception in mergeInit");
3798         }
3799         mergeFinish(merge);
3800       }
3801     }
3802   }
3803 
3804   synchronized private void _mergeInit(MergePolicy.OneMerge merge) throws IOException {
3805 
3806     testPoint("startMergeInit");
3807 
3808     assert merge.registerDone;
3809     assert merge.maxNumSegments == -1 || merge.maxNumSegments > 0;
3810 
3811     if (tragedy != null) {
3812       throw new IllegalStateException("this writer hit an unrecoverable error; cannot merge", tragedy);
3813     }
3814 
3815     if (merge.info != null) {
3816       // mergeInit already done
3817       return;
3818     }
3819 
3820     if (merge.rateLimiter.getAbort()) {
3821       return;
3822     }
3823 
3824     // TODO: in the non-pool'd case this is somewhat
3825     // wasteful, because we open these readers, close them,
3826     // and then open them again for merging.  Maybe  we
3827     // could pre-pool them somehow in that case...
3828 
3829     if (infoStream.isEnabled("IW")) {
3830       infoStream.message("IW", "now apply deletes for " + merge.segments.size() + " merging segments");
3831     }
3832 
3833     // Lock order: IW -> BD
3834     final BufferedUpdatesStream.ApplyDeletesResult result = bufferedUpdatesStream.applyDeletesAndUpdates(readerPool, merge.segments);
3835     
3836     if (result.anyDeletes) {
3837       checkpoint();
3838     }
3839 
3840     if (!keepFullyDeletedSegments && result.allDeleted != null) {
3841       if (infoStream.isEnabled("IW")) {
3842         infoStream.message("IW", "drop 100% deleted segments: " + result.allDeleted);
3843       }
3844       for(SegmentCommitInfo info : result.allDeleted) {
3845         segmentInfos.remove(info);
3846         pendingNumDocs.addAndGet(-info.info.maxDoc());
3847         if (merge.segments.contains(info)) {
3848           mergingSegments.remove(info);
3849           merge.segments.remove(info);
3850         }
3851         readerPool.drop(info);
3852       }
3853       checkpoint();
3854     }
3855 
3856     // Bind a new segment name here so even with
3857     // ConcurrentMergePolicy we keep deterministic segment
3858     // names.
3859     final String mergeSegmentName = newSegmentName();
3860     SegmentInfo si = new SegmentInfo(directoryOrig, Version.LATEST, mergeSegmentName, -1, false, codec, Collections.<String,String>emptyMap(), StringHelper.randomId(), new HashMap<String,String>());
3861     Map<String,String> details = new HashMap<>();
3862     details.put("mergeMaxNumSegments", "" + merge.maxNumSegments);
3863     details.put("mergeFactor", Integer.toString(merge.segments.size()));
3864     setDiagnostics(si, SOURCE_MERGE, details);
3865     merge.setMergeInfo(new SegmentCommitInfo(si, 0, -1L, -1L, -1L));
3866 
3867 //    System.out.println("[" + Thread.currentThread().getName() + "] IW._mergeInit: " + segString(merge.segments) + " into " + si);
3868 
3869     // Lock order: IW -> BD
3870     bufferedUpdatesStream.prune(segmentInfos);
3871 
3872     if (infoStream.isEnabled("IW")) {
3873       infoStream.message("IW", "merge seg=" + merge.info.info.name + " " + segString(merge.segments));
3874     }
3875   }
3876 
3877   static void setDiagnostics(SegmentInfo info, String source) {
3878     setDiagnostics(info, source, null);
3879   }
3880 
3881   private static void setDiagnostics(SegmentInfo info, String source, Map<String,String> details) {
3882     Map<String,String> diagnostics = new HashMap<>();
3883     diagnostics.put("source", source);
3884     diagnostics.put("lucene.version", Version.LATEST.toString());
3885     diagnostics.put("os", Constants.OS_NAME);
3886     diagnostics.put("os.arch", Constants.OS_ARCH);
3887     diagnostics.put("os.version", Constants.OS_VERSION);
3888     diagnostics.put("java.version", Constants.JAVA_VERSION);
3889     diagnostics.put("java.vendor", Constants.JAVA_VENDOR);
3890     // On IBM J9 JVM this is better than java.version which is just 1.7.0 (no update level):
3891     diagnostics.put("java.runtime.version", System.getProperty("java.runtime.version", "undefined"));
3892     // Hotspot version, e.g. 2.8 for J9:
3893     diagnostics.put("java.vm.version", System.getProperty("java.vm.version", "undefined"));
3894     diagnostics.put("timestamp", Long.toString(new Date().getTime()));
3895     if (details != null) {
3896       diagnostics.putAll(details);
3897     }
3898     info.setDiagnostics(diagnostics);
3899   }
3900 
3901   /** Does fininishing for a merge, which is fast but holds
3902    *  the synchronized lock on IndexWriter instance. */
3903   final synchronized void mergeFinish(MergePolicy.OneMerge merge) {
3904 
3905     // forceMerge, addIndexes or waitForMerges may be waiting
3906     // on merges to finish.
3907     notifyAll();
3908 
3909     // It's possible we are called twice, eg if there was an
3910     // exception inside mergeInit
3911     if (merge.registerDone) {
3912       final List<SegmentCommitInfo> sourceSegments = merge.segments;
3913       for (SegmentCommitInfo info : sourceSegments) {
3914         mergingSegments.remove(info);
3915       }
3916       merge.registerDone = false;
3917     }
3918 
3919     runningMerges.remove(merge);
3920   }
3921 
3922   private final synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
3923     final int numSegments = merge.readers.size();
3924     Throwable th = null;
3925 
3926     boolean drop = !suppressExceptions;
3927     
3928     for (int i = 0; i < numSegments; i++) {
3929       final SegmentReader sr = merge.readers.get(i);
3930       if (sr != null) {
3931         try {
3932           final ReadersAndUpdates rld = readerPool.get(sr.getSegmentInfo(), false);
3933           // We still hold a ref so it should not have been removed:
3934           assert rld != null;
3935           if (drop) {
3936             rld.dropChanges();
3937           } else {
3938             rld.dropMergingUpdates();
3939           }
3940           rld.release(sr);
3941           readerPool.release(rld);
3942           if (drop) {
3943             readerPool.drop(rld.info);
3944           }
3945         } catch (Throwable t) {
3946           if (th == null) {
3947             th = t;
3948           }
3949         }
3950         merge.readers.set(i, null);
3951       }
3952     }
3953 
3954     try {
3955       merge.mergeFinished();
3956     } catch (Throwable t) {
3957       if (th == null) {
3958         th = t;
3959       }
3960     }
3961     
3962     // If any error occured, throw it.
3963     if (!suppressExceptions) {
3964       IOUtils.reThrow(th);
3965     }
3966   }
3967 
3968   /** Does the actual (time-consuming) work of the merge,
3969    *  but without holding synchronized lock on IndexWriter
3970    *  instance */
3971   private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) throws IOException {
3972 
3973     merge.rateLimiter.checkAbort();
3974 
3975     List<SegmentCommitInfo> sourceSegments = merge.segments;
3976     
3977     IOContext context = new IOContext(merge.getStoreMergeInfo());
3978 
3979     final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(mergeDirectory);
3980 
3981     if (infoStream.isEnabled("IW")) {
3982       infoStream.message("IW", "merging " + segString(merge.segments));
3983     }
3984 
3985     merge.readers = new ArrayList<>(sourceSegments.size());
3986 
3987     // This is try/finally to make sure merger's readers are
3988     // closed:
3989     boolean success = false;
3990     try {
3991       int segUpto = 0;
3992       while(segUpto < sourceSegments.size()) {
3993 
3994         final SegmentCommitInfo info = sourceSegments.get(segUpto);
3995 
3996         // Hold onto the "live" reader; we will use this to
3997         // commit merged deletes
3998         final ReadersAndUpdates rld = readerPool.get(info, true);
3999 
4000         // Carefully pull the most recent live docs and reader
4001         SegmentReader reader;
4002         final Bits liveDocs;
4003         final int delCount;
4004 
4005         synchronized (this) {
4006           // Must sync to ensure BufferedDeletesStream cannot change liveDocs,
4007           // pendingDeleteCount and field updates while we pull a copy:
4008           reader = rld.getReaderForMerge(context);
4009           liveDocs = rld.getReadOnlyLiveDocs();
4010           delCount = rld.getPendingDeleteCount() + info.getDelCount();
4011 
4012           assert reader != null;
4013           assert rld.verifyDocCounts();
4014 
4015           if (infoStream.isEnabled("IW")) {
4016             if (rld.getPendingDeleteCount() != 0) {
4017               infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
4018             } else if (info.getDelCount() != 0) {
4019               infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount());
4020             } else {
4021               infoStream.message("IW", "seg=" + segString(info) + " no deletes");
4022             }
4023           }
4024         }
4025 
4026         // Deletes might have happened after we pulled the merge reader and
4027         // before we got a read-only copy of the segment's actual live docs
4028         // (taking pending deletes into account). In that case we need to
4029         // make a new reader with updated live docs and del count.
4030         if (reader.numDeletedDocs() != delCount) {
4031           // fix the reader's live docs and del count
4032           assert delCount > reader.numDeletedDocs(); // beware of zombies
4033 
4034           SegmentReader newReader;
4035 
4036           synchronized (this) {
4037             // We must also sync on IW here, because another thread could be writing
4038             // new DV updates / remove old gen field infos files causing FNFE:
4039             newReader = new SegmentReader(info, reader, liveDocs, info.info.maxDoc() - delCount);
4040           }
4041 
4042           boolean released = false;
4043           try {
4044             rld.release(reader);
4045             released = true;
4046           } finally {
4047             if (!released) {
4048               newReader.decRef();
4049             }
4050           }
4051 
4052           reader = newReader;
4053         }
4054 
4055         merge.readers.add(reader);
4056         assert delCount <= info.info.maxDoc(): "delCount=" + delCount + " info.maxDoc=" + info.info.maxDoc() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
4057         segUpto++;
4058       }
4059 
4060 //      System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders());
4061       
4062       // we pass merge.getMergeReaders() instead of merge.readers to allow the
4063       // OneMerge to return a view over the actual segments to merge
4064       final SegmentMerger merger = new SegmentMerger(merge.getMergeReaders(),
4065                                                      merge.info.info, infoStream, dirWrapper,
4066                                                      globalFieldNumberMap, 
4067                                                      context);
4068 
4069       merge.rateLimiter.checkAbort();
4070 
4071       merge.mergeStartNS = System.nanoTime();
4072 
4073       // This is where all the work happens:
4074       if (merger.shouldMerge()) {
4075         merger.merge();
4076       }
4077 
4078       MergeState mergeState = merger.mergeState;
4079       assert mergeState.segmentInfo == merge.info.info;
4080       merge.info.info.setFiles(new HashSet<>(dirWrapper.getCreatedFiles()));
4081 
4082       if (infoStream.isEnabled("IW")) {
4083         if (merger.shouldMerge()) {
4084           long t1 = System.nanoTime();
4085           double sec = (t1-merge.mergeStartNS)/1000000000.;
4086           double segmentMB = (merge.info.sizeInBytes()/1024./1024.);
4087           double stoppedSec = merge.rateLimiter.getTotalStoppedNS()/1000000000.;
4088           double throttleSec = merge.rateLimiter.getTotalPausedNS()/1000000000.;
4089           infoStream.message("IW", "merge codec=" + codec + " maxDoc=" + merge.info.info.maxDoc() + "; merged segment has " +
4090                              (mergeState.mergeFieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " +
4091                              (mergeState.mergeFieldInfos.hasNorms() ? "norms" : "no norms") + "; " + 
4092                              (mergeState.mergeFieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " + 
4093                              (mergeState.mergeFieldInfos.hasProx() ? "prox" : "no prox") + "; " + 
4094                              (mergeState.mergeFieldInfos.hasProx() ? "freqs" : "no freqs") + "; " +
4095                              String.format(Locale.ROOT,
4096                                            "%.1f sec (%.1f sec stopped, %.1f sec paused) to merge segment [%.2f MB, %.2f MB/sec]",
4097                                            sec,
4098                                            stoppedSec,
4099                                            throttleSec,
4100                                            segmentMB,
4101                                            segmentMB / sec));
4102         } else {
4103           infoStream.message("IW", "skip merging fully deleted segments");
4104         }
4105       }
4106 
4107       if (merger.shouldMerge() == false) {
4108         // Merge would produce a 0-doc segment, so we do nothing except commit the merge to remove all the 0-doc segments that we "merged":
4109         assert merge.info.info.maxDoc() == 0;
4110         commitMerge(merge, mergeState);
4111         return 0;
4112       }
4113 
4114       assert merge.info.info.maxDoc() > 0;
4115 
4116       // Very important to do this before opening the reader
4117       // because codec must know if prox was written for
4118       // this segment:
4119       //System.out.println("merger set hasProx=" + merger.hasProx() + " seg=" + merge.info.name);
4120       boolean useCompoundFile;
4121       synchronized (this) { // Guard segmentInfos
4122         useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info, this);
4123       }
4124 
4125       if (useCompoundFile) {
4126         success = false;
4127 
4128         Collection<String> filesToRemove = merge.info.files();
4129         TrackingDirectoryWrapper trackingCFSDir = new TrackingDirectoryWrapper(mergeDirectory);
4130         try {
4131           createCompoundFile(infoStream, trackingCFSDir, merge.info.info, context);
4132           success = true;
4133         } catch (Throwable t) {
4134           synchronized(this) {
4135             if (merge.rateLimiter.getAbort()) {
4136               // This can happen if rollback is called while we were building
4137               // our CFS -- fall through to logic below to remove the non-CFS
4138               // merged files:
4139               if (infoStream.isEnabled("IW")) {
4140                 infoStream.message("IW", "hit merge abort exception creating compound file during merge");
4141               }
4142               return 0;
4143             } else {
4144               handleMergeException(t, merge);
4145             }
4146           }
4147         } finally {
4148           if (success == false) {
4149             if (infoStream.isEnabled("IW")) {
4150               infoStream.message("IW", "hit exception creating compound file during merge");
4151             }
4152             // Safe: these files must exist
4153             deleteNewFiles(merge.info.files());
4154           }
4155         }
4156 
4157         // So that, if we hit exc in deleteNewFiles (next)
4158         // or in commitMerge (later), we close the
4159         // per-segment readers in the finally clause below:
4160         success = false;
4161 
4162         synchronized(this) {
4163 
4164           // delete new non cfs files directly: they were never
4165           // registered with IFD
4166           deleteNewFiles(filesToRemove);
4167 
4168           if (merge.rateLimiter.getAbort()) {
4169             if (infoStream.isEnabled("IW")) {
4170               infoStream.message("IW", "abort merge after building CFS");
4171             }
4172             // Safe: these files must exist
4173             deleteNewFiles(merge.info.files());
4174             return 0;
4175           }
4176         }
4177 
4178         merge.info.info.setUseCompoundFile(true);
4179       } else {
4180         // So that, if we hit exc in commitMerge (later),
4181         // we close the per-segment readers in the finally
4182         // clause below:
4183         success = false;
4184       }
4185 
4186       // Have codec write SegmentInfo.  Must do this after
4187       // creating CFS so that 1) .si isn't slurped into CFS,
4188       // and 2) .si reflects useCompoundFile=true change
4189       // above:
4190       boolean success2 = false;
4191       try {
4192         codec.segmentInfoFormat().write(directory, merge.info.info, context);
4193         success2 = true;
4194       } finally {
4195         if (!success2) {
4196           // Safe: these files must exist
4197           deleteNewFiles(merge.info.files());
4198         }
4199       }
4200 
4201       // TODO: ideally we would freeze merge.info here!!
4202       // because any changes after writing the .si will be
4203       // lost... 
4204 
4205       if (infoStream.isEnabled("IW")) {
4206         infoStream.message("IW", String.format(Locale.ROOT, "merged segment size=%.3f MB vs estimate=%.3f MB", merge.info.sizeInBytes()/1024./1024., merge.estimatedMergeBytes/1024/1024.));
4207       }
4208 
4209       final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
4210       if (poolReaders && mergedSegmentWarmer != null) {
4211         final ReadersAndUpdates rld = readerPool.get(merge.info, true);
4212         final SegmentReader sr = rld.getReader(IOContext.READ);
4213         try {
4214           mergedSegmentWarmer.warm(sr);
4215         } finally {
4216           synchronized(this) {
4217             rld.release(sr);
4218             readerPool.release(rld);
4219           }
4220         }
4221       }
4222 
4223       if (!commitMerge(merge, mergeState)) {
4224         // commitMerge will return false if this merge was
4225         // aborted
4226         return 0;
4227       }
4228 
4229       success = true;
4230 
4231     } finally {
4232       // Readers are already closed in commitMerge if we didn't hit
4233       // an exc:
4234       if (success == false) {
4235         closeMergeReaders(merge, true);
4236       }
4237     }
4238 
4239     return merge.info.info.maxDoc();
4240   }
4241 
4242   synchronized void addMergeException(MergePolicy.OneMerge merge) {
4243     assert merge.getException() != null;
4244     if (!mergeExceptions.contains(merge) && mergeGen == merge.mergeGen) {
4245       mergeExceptions.add(merge);
4246     }
4247   }
4248 
4249   // For test purposes.
4250   final int getBufferedDeleteTermsSize() {
4251     return docWriter.getBufferedDeleteTermsSize();
4252   }
4253 
4254   // For test purposes.
4255   final int getNumBufferedDeleteTerms() {
4256     return docWriter.getNumBufferedDeleteTerms();
4257   }
4258 
4259   // utility routines for tests
4260   synchronized SegmentCommitInfo newestSegment() {
4261     return segmentInfos.size() > 0 ? segmentInfos.info(segmentInfos.size()-1) : null;
4262   }
4263 
4264   /** Returns a string description of all segments, for
4265    *  debugging.
4266    *
4267    * @lucene.internal */
4268   synchronized String segString() {
4269     return segString(segmentInfos);
4270   }
4271 
4272   /** Returns a string description of the specified
4273    *  segments, for debugging.
4274    *
4275    * @lucene.internal */
4276   synchronized String segString(Iterable<SegmentCommitInfo> infos) {
4277     final StringBuilder buffer = new StringBuilder();
4278     for(final SegmentCommitInfo info : infos) {
4279       if (buffer.length() > 0) {
4280         buffer.append(' ');
4281       }
4282       buffer.append(segString(info));
4283     }
4284     return buffer.toString();
4285   }
4286 
4287   /** Returns a string description of the specified
4288    *  segment, for debugging.
4289    *
4290    * @lucene.internal */
4291   synchronized String segString(SegmentCommitInfo info) {
4292     return info.toString(numDeletedDocs(info) - info.getDelCount());
4293   }
4294 
4295   private synchronized void doWait() {
4296     // NOTE: the callers of this method should in theory
4297     // be able to do simply wait(), but, as a defense
4298     // against thread timing hazards where notifyAll()
4299     // fails to be called, we wait for at most 1 second
4300     // and then return so caller can check if wait
4301     // conditions are satisfied:
4302     try {
4303       wait(1000);
4304     } catch (InterruptedException ie) {
4305       throw new ThreadInterruptedException(ie);
4306     }
4307   }
4308 
4309   private boolean keepFullyDeletedSegments;
4310 
4311   /** Only for testing.
4312    *
4313    * @lucene.internal */
4314   void setKeepFullyDeletedSegments(boolean v) {
4315     keepFullyDeletedSegments = v;
4316   }
4317 
4318   boolean getKeepFullyDeletedSegments() {
4319     return keepFullyDeletedSegments;
4320   }
4321 
4322   // called only from assert
4323   private boolean filesExist(SegmentInfos toSync) throws IOException {
4324     
4325     Collection<String> files = toSync.files(false);
4326     for(final String fileName: files) {
4327       assert slowFileExists(directory, fileName): "file " + fileName + " does not exist; files=" + Arrays.toString(directory.listAll());
4328       // If this trips it means we are missing a call to
4329       // .checkpoint somewhere, because by the time we
4330       // are called, deleter should know about every
4331       // file referenced by the current head
4332       // segmentInfos:
4333       assert deleter.exists(fileName): "IndexFileDeleter doesn't know about file " + fileName;
4334     }
4335     return true;
4336   }
4337 
4338   // For infoStream output
4339   synchronized SegmentInfos toLiveInfos(SegmentInfos sis) {
4340     final SegmentInfos newSIS = new SegmentInfos();
4341     final Map<SegmentCommitInfo,SegmentCommitInfo> liveSIS = new HashMap<>();
4342     for(SegmentCommitInfo info : segmentInfos) {
4343       liveSIS.put(info, info);
4344     }
4345     for(SegmentCommitInfo info : sis) {
4346       SegmentCommitInfo liveInfo = liveSIS.get(info);
4347       if (liveInfo != null) {
4348         info = liveInfo;
4349       }
4350       newSIS.add(info);
4351     }
4352 
4353     return newSIS;
4354   }
4355 
4356   /** Walk through all files referenced by the current
4357    *  segmentInfos and ask the Directory to sync each file,
4358    *  if it wasn't already.  If that succeeds, then we
4359    *  prepare a new segments_N file but do not fully commit
4360    *  it. */
4361   private void startCommit(final SegmentInfos toSync) throws IOException {
4362 
4363     testPoint("startStartCommit");
4364     assert pendingCommit == null;
4365 
4366     if (tragedy != null) {
4367       throw new IllegalStateException("this writer hit an unrecoverable error; cannot commit", tragedy);
4368     }
4369 
4370     try {
4371 
4372       if (infoStream.isEnabled("IW")) {
4373         infoStream.message("IW", "startCommit(): start");
4374       }
4375 
4376       synchronized(this) {
4377 
4378         if (lastCommitChangeCount > changeCount.get()) {
4379           throw new IllegalStateException("lastCommitChangeCount=" + lastCommitChangeCount + ",changeCount=" + changeCount);
4380         }
4381 
4382         if (pendingCommitChangeCount == lastCommitChangeCount) {
4383           if (infoStream.isEnabled("IW")) {
4384             infoStream.message("IW", "  skip startCommit(): no changes pending");
4385           }
4386           try {
4387             deleter.decRef(filesToCommit);
4388           } finally {
4389             filesToCommit = null;
4390           }
4391           return;
4392         }
4393 
4394         if (infoStream.isEnabled("IW")) {
4395           infoStream.message("IW", "startCommit index=" + segString(toLiveInfos(toSync)) + " changeCount=" + changeCount);
4396         }
4397 
4398         assert filesExist(toSync);
4399       }
4400 
4401       testPoint("midStartCommit");
4402 
4403       boolean pendingCommitSet = false;
4404 
4405       try {
4406 
4407         testPoint("midStartCommit2");
4408 
4409         synchronized(this) {
4410 
4411           assert pendingCommit == null;
4412 
4413           assert segmentInfos.getGeneration() == toSync.getGeneration();
4414 
4415           // Exception here means nothing is prepared
4416           // (this method unwinds everything it did on
4417           // an exception)
4418           toSync.prepareCommit(directory);
4419           if (infoStream.isEnabled("IW")) {
4420             infoStream.message("IW", "startCommit: wrote pending segments file \"" + IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", toSync.getGeneration()) + "\"");
4421           }
4422 
4423           //System.out.println("DONE prepareCommit");
4424 
4425           pendingCommitSet = true;
4426           pendingCommit = toSync;
4427         }
4428 
4429         // This call can take a long time -- 10s of seconds
4430         // or more.  We do it without syncing on this:
4431         boolean success = false;
4432         final Collection<String> filesToSync;
4433         try {
4434           filesToSync = toSync.files(false);
4435           directory.sync(filesToSync);
4436           success = true;
4437         } finally {
4438           if (!success) {
4439             pendingCommitSet = false;
4440             pendingCommit = null;
4441             toSync.rollbackCommit(directory);
4442           }
4443         }
4444 
4445         if (infoStream.isEnabled("IW")) {
4446           infoStream.message("IW", "done all syncs: " + filesToSync);
4447         }
4448 
4449         testPoint("midStartCommitSuccess");
4450 
4451       } finally {
4452         synchronized(this) {
4453           // Have our master segmentInfos record the
4454           // generations we just prepared.  We do this
4455           // on error or success so we don't
4456           // double-write a segments_N file.
4457           segmentInfos.updateGeneration(toSync);
4458 
4459           if (!pendingCommitSet) {
4460             if (infoStream.isEnabled("IW")) {
4461               infoStream.message("IW", "hit exception committing segments file");
4462             }
4463 
4464             // Hit exception
4465             deleter.decRefWhileHandlingException(filesToCommit);
4466             filesToCommit = null;
4467           }
4468         }
4469       }
4470     } catch (VirtualMachineError tragedy) {
4471       tragicEvent(tragedy, "startCommit");
4472     }
4473     testPoint("finishStartCommit");
4474   }
4475 
4476   /**
4477    * Returns <code>true</code> iff the index in the named directory is
4478    * currently locked.
4479    * @param directory the directory to check for a lock
4480    * @throws IOException if there is a low-level IO error
4481    * @deprecated Use of this method can only lead to race conditions. Try
4482    *             to actually obtain a lock instead.
4483    */
4484   @Deprecated
4485   public static boolean isLocked(Directory directory) throws IOException {
4486     try {
4487       directory.obtainLock(WRITE_LOCK_NAME).close();
4488       return false;
4489     } catch (LockObtainFailedException failed) {
4490       return true;
4491     }
4492   }
4493 
4494   /** If {@link DirectoryReader#open(IndexWriter,boolean)} has
4495    *  been called (ie, this writer is in near real-time
4496    *  mode), then after a merge completes, this class can be
4497    *  invoked to warm the reader on the newly merged
4498    *  segment, before the merge commits.  This is not
4499    *  required for near real-time search, but will reduce
4500    *  search latency on opening a new near real-time reader
4501    *  after a merge completes.
4502    *
4503    * @lucene.experimental
4504    *
4505    * <p><b>NOTE</b>: warm is called before any deletes have
4506    * been carried over to the merged segment. */
4507   public static abstract class IndexReaderWarmer {
4508 
4509     /** Sole constructor. (For invocation by subclass 
4510      *  constructors, typically implicit.) */
4511     protected IndexReaderWarmer() {
4512     }
4513 
4514     /** Invoked on the {@link LeafReader} for the newly
4515      *  merged segment, before that segment is made visible
4516      *  to near-real-time readers. */
4517     public abstract void warm(LeafReader reader) throws IOException;
4518   }
4519 
4520   void tragicEvent(Throwable tragedy, String location) throws IOException {
4521 
4522     // unbox our internal AbortingException
4523     if (tragedy instanceof AbortingException) {
4524       tragedy = tragedy.getCause();
4525     }
4526 
4527     // This is not supposed to be tragic: IW is supposed to catch this and
4528     // ignore, because it means we asked the merge to abort:
4529     assert tragedy instanceof MergePolicy.MergeAbortedException == false;
4530 
4531     // We cannot hold IW's lock here else it can lead to deadlock:
4532     assert Thread.holdsLock(this) == false;
4533 
4534     // How can it be a tragedy when nothing happened?
4535     assert tragedy != null;
4536 
4537     if (infoStream.isEnabled("IW")) {
4538       infoStream.message("IW", "hit tragic " + tragedy.getClass().getSimpleName() + " inside " + location);
4539     }
4540 
4541     synchronized (this) {
4542       // It's possible you could have a really bad day
4543       if (this.tragedy != null) {
4544         // Another thread is already dealing / has dealt with the tragedy:
4545         IOUtils.reThrow(tragedy);
4546       }
4547 
4548       this.tragedy = tragedy;
4549     }
4550 
4551     // if we are already closed (e.g. called by rollback), this will be a no-op.
4552     if (shouldClose(false)) {
4553       rollbackInternal();
4554     }
4555 
4556     IOUtils.reThrow(tragedy);
4557   }
4558 
4559   /** If this {@code IndexWriter} was closed as a side-effect of a tragic exception,
4560    *  e.g. disk full while flushing a new segment, this returns the root cause exception.
4561    *  Otherwise (no tragic exception has occurred) it returns null. */
4562   public Throwable getTragicException() {
4563     return tragedy;
4564   }
4565 
4566   /** Returns {@code true} if this {@code IndexWriter} is still open. */
4567   public boolean isOpen() {
4568     return closing == false && closed == false;
4569   }
4570 
4571   // Used for testing.  Current points:
4572   //   startDoFlush
4573   //   startCommitMerge
4574   //   startStartCommit
4575   //   midStartCommit
4576   //   midStartCommit2
4577   //   midStartCommitSuccess
4578   //   finishStartCommit
4579   //   startCommitMergeDeletes
4580   //   startMergeInit
4581   //   DocumentsWriter.ThreadState.init start
4582   private final void testPoint(String message) {
4583     if (enableTestPoints) {
4584       assert infoStream.isEnabled("TP"); // don't enable unless you need them.
4585       infoStream.message("TP", message);
4586     }
4587   }
4588 
4589   synchronized boolean nrtIsCurrent(SegmentInfos infos) {
4590     //System.out.println("IW.nrtIsCurrent " + (infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletesStream.any()));
4591     ensureOpen();
4592     boolean isCurrent = infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedUpdatesStream.any();
4593     if (infoStream.isEnabled("IW")) {
4594       if (isCurrent == false) {
4595         infoStream.message("IW", "nrtIsCurrent: infoVersion matches: " + (infos.version == segmentInfos.version) + "; DW changes: " + docWriter.anyChanges() + "; BD changes: "+ bufferedUpdatesStream.any());
4596       }
4597     }
4598     return isCurrent;
4599   }
4600 
4601   synchronized boolean isClosed() {
4602     return closed;
4603   }
4604 
4605   /** Expert: remove any index files that are no longer
4606    *  used.
4607    *
4608    *  <p> IndexWriter normally deletes unused files itself,
4609    *  during indexing.  However, on Windows, which disallows
4610    *  deletion of open files, if there is a reader open on
4611    *  the index then those files cannot be deleted.  This is
4612    *  fine, because IndexWriter will periodically retry
4613    *  the deletion.</p>
4614    *
4615    *  <p> However, IndexWriter doesn't try that often: only
4616    *  on open, close, flushing a new segment, and finishing
4617    *  a merge.  If you don't do any of these actions with your
4618    *  IndexWriter, you'll see the unused files linger.  If
4619    *  that's a problem, call this method to delete them
4620    *  (once you've closed the open readers that were
4621    *  preventing their deletion). 
4622    *  
4623    *  <p> In addition, you can call this method to delete 
4624    *  unreferenced index commits. This might be useful if you 
4625    *  are using an {@link IndexDeletionPolicy} which holds
4626    *  onto index commits until some criteria are met, but those
4627    *  commits are no longer needed. Otherwise, those commits will
4628    *  be deleted the next time commit() is called.
4629    */
4630   public synchronized void deleteUnusedFiles() throws IOException {
4631     ensureOpen(false);
4632     deleter.deletePendingFiles();
4633     deleter.revisitPolicy();
4634   }
4635 
4636   private synchronized void deletePendingFiles() throws IOException {
4637     deleter.deletePendingFiles();
4638   }
4639   
4640   /**
4641    * NOTE: this method creates a compound file for all files returned by
4642    * info.files(). While, generally, this may include separate norms and
4643    * deletion files, this SegmentInfo must not reference such files when this
4644    * method is called, because they are not allowed within a compound file.
4645    */
4646   final void createCompoundFile(InfoStream infoStream, TrackingDirectoryWrapper directory, final SegmentInfo info, IOContext context) throws IOException {
4647 
4648     // maybe this check is not needed, but why take the risk?
4649     if (!directory.getCreatedFiles().isEmpty()) {
4650       throw new IllegalStateException("pass a clean trackingdir for CFS creation");
4651     }
4652     
4653     if (infoStream.isEnabled("IW")) {
4654       infoStream.message("IW", "create compound file");
4655     }
4656     // Now merge all added files    
4657     boolean success = false;
4658     try {
4659       info.getCodec().compoundFormat().write(directory, info, context);
4660       success = true;
4661     } finally {
4662       if (!success) {
4663         // Safe: these files must exist
4664         deleteNewFiles(directory.getCreatedFiles());
4665       }
4666     }
4667 
4668     // Replace all previous files with the CFS/CFE files:
4669     info.setFiles(new HashSet<>(directory.getCreatedFiles()));
4670   }
4671   
4672   /**
4673    * Tries to delete the given files if unreferenced
4674    * @param files the files to delete
4675    * @throws IOException if an {@link IOException} occurs
4676    * @see IndexFileDeleter#deleteNewFiles(Collection)
4677    */
4678   synchronized final void deleteNewFiles(Collection<String> files) throws IOException {
4679     deleter.deleteNewFiles(files);
4680   }
4681   
4682   /**
4683    * Cleans up residuals from a segment that could not be entirely flushed due to an error
4684    */
4685   synchronized final void flushFailed(SegmentInfo info) throws IOException {
4686     // TODO: this really should be a tragic
4687     Collection<String> files;
4688     try {
4689       files = info.files();
4690     } catch (IllegalStateException ise) {
4691       // OK
4692       files = null;
4693     }
4694     if (files != null) {
4695       deleter.deleteNewFiles(files);
4696     }
4697   }
4698   
4699   final int purge(boolean forced) throws IOException {
4700     return docWriter.purgeBuffer(this, forced);
4701   }
4702 
4703   final void applyDeletesAndPurge(boolean forcePurge) throws IOException {
4704     try {
4705       purge(forcePurge);
4706     } finally {
4707       if (applyAllDeletesAndUpdates()) {
4708         maybeMerge(config.getMergePolicy(), MergeTrigger.SEGMENT_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
4709       }
4710       flushCount.incrementAndGet();
4711     }
4712   }
4713   
4714   final void doAfterSegmentFlushed(boolean triggerMerge, boolean forcePurge) throws IOException {
4715     try {
4716       purge(forcePurge);
4717     } finally {
4718       if (triggerMerge) {
4719         maybeMerge(config.getMergePolicy(), MergeTrigger.SEGMENT_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
4720       }
4721     }
4722   }
4723   
4724   synchronized void incRefDeleter(SegmentInfos segmentInfos) throws IOException {
4725     ensureOpen();
4726     deleter.incRef(segmentInfos, false);
4727     if (infoStream.isEnabled("IW")) {
4728       infoStream.message("IW", "incRefDeleter for NRT reader version=" + segmentInfos.getVersion() + " segments=" + segString(segmentInfos));
4729     }
4730   }
4731   
4732   synchronized void decRefDeleter(SegmentInfos segmentInfos) throws IOException {
4733     ensureOpen();
4734     deleter.decRef(segmentInfos);
4735     if (infoStream.isEnabled("IW")) {
4736       infoStream.message("IW", "decRefDeleter for NRT reader version=" + segmentInfos.getVersion() + " segments=" + segString(segmentInfos));
4737     }
4738   }
4739   
4740   private boolean processEvents(boolean triggerMerge, boolean forcePurge) throws IOException {
4741     return processEvents(eventQueue, triggerMerge, forcePurge);
4742   }
4743   
4744   private boolean processEvents(Queue<Event> queue, boolean triggerMerge, boolean forcePurge) throws IOException {
4745     boolean processed = false;
4746     if (tragedy == null) {
4747       Event event;
4748       while((event = queue.poll()) != null)  {
4749         processed = true;
4750         event.process(this, triggerMerge, forcePurge);
4751       }
4752     }
4753     return processed;
4754   }
4755   
4756   /**
4757    * Interface for internal atomic events. See {@link DocumentsWriter} for details. Events are executed concurrently and no order is guaranteed.
4758    * Each event should only rely on the serializeability within its process method. All actions that must happen before or after a certain action must be
4759    * encoded inside the {@link #process(IndexWriter, boolean, boolean)} method.
4760    *
4761    */
4762   static interface Event {
4763     
4764     /**
4765      * Processes the event. This method is called by the {@link IndexWriter}
4766      * passed as the first argument.
4767      * 
4768      * @param writer
4769      *          the {@link IndexWriter} that executes the event.
4770      * @param triggerMerge
4771      *          <code>false</code> iff this event should not trigger any segment merges
4772      * @param clearBuffers
4773      *          <code>true</code> iff this event should clear all buffers associated with the event.
4774      * @throws IOException
4775      *           if an {@link IOException} occurs
4776      */
4777     void process(IndexWriter writer, boolean triggerMerge, boolean clearBuffers) throws IOException;
4778   }
4779 
4780   /** Used only by asserts: returns true if the file exists
4781    *  (can be opened), false if it cannot be opened, and
4782    *  (unlike Java's File.exists) throws IOException if
4783    *  there's some unexpected error. */
4784   static boolean slowFileExists(Directory dir, String fileName) throws IOException {
4785     try {
4786       dir.openInput(fileName, IOContext.DEFAULT).close();
4787       return true;
4788     } catch (NoSuchFileException | FileNotFoundException e) {
4789       return false;
4790     }
4791   }
4792 
4793   /** Anything that will add N docs to the index should reserve first to
4794    *  make sure it's allowed.  This will throw {@code
4795    *  IllegalArgumentException} if it's not allowed. */ 
4796   private void reserveDocs(long addedNumDocs) {
4797     assert addedNumDocs >= 0;
4798     if (pendingNumDocs.addAndGet(addedNumDocs) > actualMaxDocs) {
4799       // Reserve failed: put the docs back and throw exc:
4800       pendingNumDocs.addAndGet(-addedNumDocs);
4801       tooManyDocs(addedNumDocs);
4802     }
4803   }
4804 
4805   /** Does a best-effort check, that the current index would accept this many additional docs, but does not actually reserve them.
4806    *
4807    * @throws IllegalArgumentException if there would be too many docs */
4808   private void testReserveDocs(long addedNumDocs) {
4809     assert addedNumDocs >= 0;
4810     if (pendingNumDocs.get() + addedNumDocs > actualMaxDocs) {
4811       tooManyDocs(addedNumDocs);
4812     }
4813   }
4814 
4815   private void tooManyDocs(long addedNumDocs) {
4816     assert addedNumDocs >= 0;
4817     throw new IllegalArgumentException("number of documents in the index cannot exceed " + actualMaxDocs + " (current document count is " + pendingNumDocs.get() + "; added numDocs is " + addedNumDocs + ")");
4818   }
4819 
4820   /** Wraps the incoming {@link Directory} so that we assign a per-thread
4821    *  {@link MergeRateLimiter} to all created {@link IndexOutput}s. */
4822   private Directory addMergeRateLimiters(Directory in) {
4823     return new FilterDirectory(in) {
4824       @Override
4825       public IndexOutput createOutput(String name, IOContext context) throws IOException {
4826         ensureOpen();
4827 
4828         // Paranoia defense: if this trips we have a bug somewhere...
4829         IndexWriter.this.ensureOpen(false);
4830 
4831         // This Directory is only supposed to be used during merging,
4832         // so all writes should have MERGE context, else there is a bug 
4833         // somewhere that is failing to pass down the right IOContext:
4834         assert context.context == IOContext.Context.MERGE: "got context=" + context.context;
4835 
4836         MergeRateLimiter rateLimiter = rateLimiters.get();
4837         assert rateLimiter != null;
4838 
4839         return new RateLimitedIndexOutput(rateLimiter, in.createOutput(name, context));
4840       }
4841     };
4842   }
4843 }